TensorRT icon indicating copy to clipboard operation
TensorRT copied to clipboard

segfault for the code which is similar to sampleonnxMNIST.cpp

Open liguang-ops opened this issue 3 years ago • 3 comments
trafficstars

Description

Environment

TensorRT Version: 8.2 NVIDIA GPU: gtx1660 NVIDIA Driver Version: 11.1 CUDA Version: 10.2 CUDNN Version: 8.2.1 Operating System: win10 Python Version (if applicable): Tensorflow Version (if applicable): PyTorch Version (if applicable): 1.8 Baremetal or Container (if so, version):

Relevant Files

Here are my all files,incluing CMakeLists.txt and onnx file. Your can run it right now.

Steps To Reproduce

I copied the code based on sampleonnxMNIST under samples in the TensorRT package. Also, I deleted some unused files and only kept buffers.h and common.h. But when I compile and run, I get a segfault in

    IExecutionContext* createExecutionContext() noexcept
    {
        return mImpl->createExecutionContext();
    }

which is called by

auto context = trt_unique_ptr<nvinfer1::IExecutionContext>(mEngine->createExecutionContext());

I have fighted with it for two days, but have no idea.

Here are the main code

#include "buffers.h"
#include "NvInfer.h"
#include "NvOnnxConfig.h"
#include "NvOnnxParser.h"
#include <opencv2/opencv.hpp>
#include <cuda_runtime_api.h>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <vector>
#include <string>

struct OnnxParams
{
    std::string onnxFileName;
    std::string inputTensorName;
    std::string outputTensorName;
    bool fp16;
    int dlaCore;
};


class OnnxTrt
{
public:
    template<typename T>
    using trt_unique_ptr = std::unique_ptr<T, trtcommon::InferDeleter>;

    OnnxTrt(const OnnxParams& params)
        : mParams(params)
        , mEngine(nullptr)
        , mBufferMgr(nullptr)
    {}

    bool build(int logMode);

    bool infer(cv::Mat& imageIn, cv::Mat& imageOut);
    
private:
    bool mIsSerialized{false}; 
    size_t mSerializedSize{0}; 
    const std::string mSerializedName{"test.trt"};

    OnnxParams mParams; 

    nvinfer1::Dims mInputDims; 
    nvinfer1::Dims mOutputDims;

    std::shared_ptr<nvinfer1::ICudaEngine> mEngine; 
    std::shared_ptr<trtcommon::BufferManager> mBufferMgr;  

    bool constructNetwork(trt_unique_ptr<nvinfer1::IBuilder>& builder,
        trt_unique_ptr<nvinfer1::INetworkDefinition>& network, trt_unique_ptr<nvinfer1::IBuilderConfig>& config,
        trt_unique_ptr<nvonnxparser::IParser>& parser);

    bool preProcess(cv::Mat& imageIn);

    bool postProcess(cv::Mat& imageOut);

};


bool OnnxTrt::build(int logMode)
{
    trtcommon::Logger glogger(logMode); 
    trt_unique_ptr<nvinfer1::IRuntime> runtime{nvinfer1::createInferRuntime(glogger)};
    if (!runtime)
    {
        return false;
    }

    
    std::ifstream inFileStream(mSerializedName, std::ios::in|std::ios::binary);
    if (!inFileStream.is_open())
    {
        std::cout << "the onnx model has not been serialized, serialing now ..." << std::endl;

        auto builder = trt_unique_ptr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(glogger));
        if (!builder)
        {
            return false;
        }

        
        const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
        auto network = trt_unique_ptr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(explicitBatch));
        if (!network)
        {
            return false;
        }

        
        auto config = trt_unique_ptr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
        if (!config)
        {
            return false;
        }

        
        auto parser
            = trt_unique_ptr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, glogger));
        if (!parser)
        {
            return false;
        }

        
        auto constructed = constructNetwork(builder, network, config, parser);
        if (!constructed)
        {
            return false;
        }

       
        auto profileStream = trtcommon::makeCudaStream();
        if (!profileStream)
        {
            return false;
        }
        config->setProfileStream(*profileStream);

        trt_unique_ptr<nvinfer1::IHostMemory> plan{builder->buildSerializedNetwork(*network, *config)};
        if (!plan)
        {
            return false;
        }

        mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
            runtime->deserializeCudaEngine(plan->data(), plan->size()), trtcommon::InferDeleter());


        std::ofstream outFileStream(mSerializedName.c_str(), std::ios::out|std::ios::binary);
        if (!outFileStream.is_open())
        {
            std::cerr << "fail to open the file to write serialized model " << std::endl;
            return EXIT_FAILURE;
        }

        mSerializedSize = plan->size();
        std:: cout << "allocate memory size: " << mSerializedSize << "bytes" << std::endl;
        unsigned char *p = static_cast<unsigned char*>(plan->data());
        outFileStream.write((char*)p, mSerializedSize);
        outFileStream.close();

        mIsSerialized = true;
    }
    else
    {
        inFileStream.seekg(0, inFileStream.end);
        size_t mSerializedSize = inFileStream.tellg();

        inFileStream.clear();
		inFileStream.seekg(0, inFileStream.beg);

        char *modelData = new char[mSerializedSize];
        inFileStream.read(modelData, mSerializedSize);
        
        mEngine = std::shared_ptr<nvinfer1::ICudaEngine>(
            runtime->deserializeCudaEngine(modelData, mSerializedSize), trtcommon::InferDeleter());
        delete[] modelData;
        inFileStream.close();
    }
    
    if (!mEngine)
    {
        return false;
    }

    mBufferMgr = std::shared_ptr<trtcommon::BufferManager>(new trtcommon::BufferManager(mEngine), trtcommon::InferDeleter());

    mInputDims = mEngine->getBindingDimensions(0);
    ASSERT(mInputDims.nbDims == 5);
    mOutputDims = mEngine->getBindingDimensions(1);
    ASSERT(mOutputDims.nbDims == 5);

    return true;
}

bool OnnxTrt::constructNetwork(trt_unique_ptr<nvinfer1::IBuilder>& builder,
    trt_unique_ptr<nvinfer1::INetworkDefinition>& network, trt_unique_ptr<nvinfer1::IBuilderConfig>& config,
    trt_unique_ptr<nvonnxparser::IParser>& parser)
{
    auto parsed = parser->parseFromFile(mParams.onnxFileName.c_str(),
        static_cast<int>(nvinfer1::ILogger::Severity::kWARNING));
    if (!parsed)
    {
        for(int32_t i = 0; i< parser->getNbErrors(); ++i)
        {
            std::cout << parser->getError(i)->desc() << std::endl;
        }
        return false;
    }

    config->setMaxWorkspaceSize(1U<<26);  
    if (mParams.fp16)
    {
        config->setFlag(nvinfer1::BuilderFlag::kFP16);
    }

    trtcommon::enableDLA(builder.get(), config.get(), mParams.dlaCore); 

    return true;
}


bool OnnxTrt::infer(cv::Mat& ImageIn, cv::Mat& ImageOut)
{
    preProcess(ImageIn);

    /* const signed short max_HU = ;
     * const signed short min_HU = ;
     */
	auto context = trt_unique_ptr<nvinfer1::IExecutionContext>(mEngine->createExecutionContext());
	
	if (!context)
	{
		return false;
	}

	// Memcpy from host input buffers to device input buffers
	mBufferMgr->copyInputToDevice();

	bool status = context->executeV2(mBufferMgr->getDeviceBindings().data());
	if (!status)
	{
		return false;
	}

    // Memcpy from device output buffers to host output buffers
    mBufferMgr->copyOutputToHost();


    postProcess(ImageOut);
    return true;
}

bool OnnxTrt::preProcess(cv::Mat& imageIn)
{
    const int inputD = mInputDims.d[2];
    const int inputH = mInputDims.d[3];
    const int inputW = mInputDims.d[4];

    float* hostDataBuffer = static_cast<float*>(mBufferMgr->getHostBuffer(mParams.inputTensorName));


    float* matBuffer = (float*)imageIn.data;
    for (int i = 0; i < inputH * inputW * inputD; i++)
    {
        hostDataBuffer[i] = *(matBuffer + i);
    }
    return true;
}

bool OnnxTrt::postProcess(cv::Mat& imageOut)
{
    const int OutputD = mOutputDims.d[2];
    const int OutputH = mOutputDims.d[3];
    const int OutputW = mOutputDims.d[4];
    int shape[3] = {OutputD, OutputH, OutputW};

    ASSERT(imageOut.size == cv::MatSize(shape));

    float* hostDataBuffer = static_cast<float*>(mBufferMgr->getHostBuffer(mParams.outputTensorName));

    float* matBuffer = (float*)imageOut.data;
    for (int i = 0; i < OutputH * OutputW * OutputD; i++)
    {
        *(matBuffer + i) = hostDataBuffer[i]; 
    }
    return true;
}

int main()
{
    OnnxParams params = {"../../WGAN_1_25_50_50.onnx", 
                            "modelInput", 
                            "modelOutput", 
                            false, 
                            -1};

    OnnxTrt CTEnhance(params);
    CTEnhance.build(3);
    std::cout << "build success" << std::endl;
    int samples_size[3] = {25, 50, 50};
    
	cv::Mat imageIn(3, samples_size, CV_32F);
    cv::randn(imageIn, cv::Scalar(0.0), cv::Scalar(1.0));

	cv::Mat imageOut = cv::Mat::ones(3, samples_size, CV_32F);

    CTEnhance.infer(imageIn, imageOut);

    std::cout << imageOut << std::endl;
    std::cout << "1" << std::endl;
}

liguang-ops avatar Aug 02 '22 06:08 liguang-ops

Here is the error output

发生异常: W32/0xC0000005
Unhandled exception at 0x00007FFFCD7D4686 (nvinfer.dll) in infertrt.exe: 0xC0000005: Access violation reading location 0xFFFFFFFFFFFFFFFF.

liguang-ops avatar Aug 02 '22 06:08 liguang-ops

Our sampleMnist should work? I think it's a code issue, you can debug it using gdb or add some print.

zerollzeng avatar Aug 03 '22 02:08 zerollzeng

Our sampleMnist should work? I think it's a code issue, you can debug it using gdb or add some print.

yes, the example works fine. Printing runtime info is useless, and my platform is windows with no gdb. I will try some other ideas. Thanks.

liguang-ops avatar Aug 03 '22 02:08 liguang-ops

closing since no activity for more than 14 days, please reopen if you still have question, thanks!

ttyio avatar Dec 12 '22 07:12 ttyio

Our sampleMnist should work? I think it's a code issue, you can debug it using gdb or add some print.

yes, the example works fine. Printing runtime info is useless, and my platform is windows with no gdb. I will try some other ideas. Thanks.

have you sloved it ?I meet the same problem,and have no idea to solve it.

heijieke avatar May 04 '23 10:05 heijieke