torch2trt icon indicating copy to clipboard operation
torch2trt copied to clipboard

AttributeError: 'NoneType' object has no attribute 'create_execution_context'

Open shubhamgajbhiye1994 opened this issue 3 years ago • 2 comments

Beginning ONNX file parsing Completed parsing of ONNX file Building an engine... [TensorRT] ERROR: Network must have at least one output [TensorRT] ERROR: Network validation failed. Traceback (most recent call last): File "trt_inference.py", line 76, in main() File "trt_inference.py", line 44, in main engine, context = build_engine(ONNX_FILE_PATH) File "trt_inference.py", line 37, in build_engine context = engine.create_execution_context() AttributeError: 'NoneType' object has no attribute 'create_execution_context'

-------- Script ----------------

ONNX_FILE_PATH = "ssd.onnx"

logger to capture errors, warnings, and other information during the build and inference phases

TRT_LOGGER = trt.Logger()

def build_engine(onnx_file_path): # initialize TensorRT engine and parse ONNX model builder = trt.Builder(TRT_LOGGER) network = builder.create_network()

parser = trt.OnnxParser(network, TRT_LOGGER)
# allow TensorRT to use up to 1GB of GPU memory for tactic selection
builder.max_workspace_size = 1 << 30
# we have only one image in batch
builder.max_batch_size = 1
# use FP16 mode if possible
if builder.platform_has_fast_fp16:
    builder.fp16_mode = True

# parse ONNX
with open(onnx_file_path, 'rb') as model:
    print('Beginning ONNX file parsing')
    parser.parse(model.read())
print('Completed parsing of ONNX file')

# generate TensorRT engine optimized for the target platform
print('Building an engine...')
engine = builder.build_cuda_engine(network)
context = engine.create_execution_context()
print("Completed creating Engine")
return engine, context

def main(): # initialize TensorRT engine and parse ONNX model engine, context = build_engine(ONNX_FILE_PATH) # get sizes of input and output and allocate memory required for input data and for output data for binding in engine: if engine.binding_is_input(binding): # we expect only one input input_shape = engine.get_binding_shape(binding) input_size = trt.volume(input_shape) * engine.max_batch_size * np.dtype(np.float32).itemsize # in bytes device_input = cuda.mem_alloc(input_size) else: # and one output output_shape = engine.get_binding_shape(binding) # create page-locked memory buffers (i.e. won't be swapped to disk) host_output = cuda.pagelocked_empty(trt.volume(output_shape) * engine.max_batch_size, dtype=np.float32) device_output = cuda.mem_alloc(host_output.nbytes)

# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()


# preprocess input data
host_input = np.array(preprocess_image("000005.jpg").numpy(), dtype=np.float32, order='C')
cuda.memcpy_htod_async(device_input, host_input, stream)

# run inference
context.execute_async(bindings=[int(device_input), int(device_output)], stream_handle=stream.handle)
cuda.memcpy_dtoh_async(host_output, device_output, stream)
stream.synchronize()

# postprocess results
output_data = torch.Tensor(host_output).reshape(engine.max_batch_size, output_shape[0])
# postprocess(output_data)

if name == 'main': main()

shubhamgajbhiye1994 avatar May 03 '21 18:05 shubhamgajbhiye1994

@shubhamgajbhiye1994 您好,请问您这个问题解决了吗

wang-TJ-20 avatar Apr 25 '22 06:04 wang-TJ-20

@shubhamgajbhiye1994 hello ,have you solved this question

wang-TJ-20 avatar Apr 25 '22 06:04 wang-TJ-20