mmdeploy icon indicating copy to clipboard operation
mmdeploy copied to clipboard

Convert swin trt fail

Open rylynchen opened this issue 1 year ago • 1 comments

Convert fasterrcnn, backbone use swin, config:

find_unused_parameters=True
pretrained="https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth"

model = dict(
    type='FasterRCNN',
    backbone=dict(
        type='SwinTransformer',
        embed_dims=192,
        depths=[2, 2, 18, 2],
        num_heads=[ 6, 12, 24, 48 ],
        window_size=7,
        mlp_ratio=4,
        qkv_bias=True,
        qk_scale=None,
        drop_rate=0.,
        attn_drop_rate=0.,
        drop_path_rate=0.2,
        patch_norm=True,
        out_indices=(0, 1, 2, 3),
        with_cp=False,
        convert_weights=True,
        init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
    neck=dict(
        type='FPN',
        in_channels=[192, 384, 768, 1536],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=100,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=-1,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=2000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=1000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)
        # soft-nms is also supported for rcnn testing
        # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
    ))

# img_norm_cfg = dict(
#     mean=[128, 128, 128], std=[128.0, 128.0, 128.0], to_rgb=True)
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    # dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    # dict(type='Resize', img_scale=(448, 448), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]

test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        # img_scale=(448, 448),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]

dataset_type = "VehicleDataset"

data_root = "xxx"
train_path_prefix = [
    "xxx",
]
val_path_prefix = [
    "xxx"
]
test_path_prefix = [
    "xxx",
]

img_prefix = "image"
ann_prefix = "label"

data = dict(
    samples_per_gpu=4,
    workers_per_gpu=40,
    train=dict(
        type=dataset_type,
        data_root = data_root,
        path_prefix=train_path_prefix,
        img_prefix=img_prefix,
        ann_prefix=ann_prefix,
        pipeline=train_pipeline
    ),
    val=dict(
        type=dataset_type,
        data_root = data_root,
        path_prefix=val_path_prefix,
        img_prefix=img_prefix,
        ann_prefix=ann_prefix,
        pipeline=test_pipeline
    ),
    test=dict(
        type=dataset_type,
        data_root = data_root,
        path_prefix=test_path_prefix,
        img_prefix=img_prefix,
        ann_prefix=ann_prefix,
        pipeline=test_pipeline
    ),
)

evaluation = dict(interval=1, metric=["mAP"])
# optimizer
# optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001)

optimizer = dict(
    type='AdamW',
    lr=0.0001,
    betas=(0.9, 0.999),
    weight_decay=0.05,
    paramwise_cfg=dict(
        custom_keys={
            'absolute_pos_embed': dict(decay_mult=0.),
            'relative_position_bias_table': dict(decay_mult=0.),
            'norm': dict(decay_mult=0.)
        }))
runner = dict(type='EpochBasedRunner', max_epochs=32)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=1000,
    warmup_ratio=0.001,
    step=[27, 33])

checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
    interval=10,
    hooks=[
        dict(type='TextLoggerHook'),
        dict(type='TensorboardLoggerHook', log_dir='xxx'),
    ])
# yapf:enable
# custom_hooks = [dict(type='NumClassCheckHook')]

dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]

deploy command :

python tools/deploy.py \
    configs/mmdet/detection/detection_tensorrt_static-800x1333.py \
    /code3/projects/detection/config/vehicle/fasterrcnn_swin_config.py \
    /xxx/vehicle_swin.pth \
    /root/mmdetection/demo/demo.jpg \
    --work-dir work_dir/vehicle_swin \
    --show \
    --device cuda:0

Got error:

root@4e713889a71f:~/mmdeploy# python tools/deploy.py \
>     configs/mmdet/detection/detection_tensorrt_static-800x1333.py \
>     /code3/projects/detection/config/vehicle/fasterrcnn_swin_config.py \
>     /xxx/vehicle_swin.pth \
>     /root/mmdetection/demo/demo.jpg \
>     --work-dir work_dir/vehicle_swin \
>     --show \
>     --device cuda:0
[2022-08-18 07:16:26.841] [mmdeploy] [info] [model.cpp:95] Register 'DirectoryModel'
[2022-08-18 07:16:27.968] [mmdeploy] [info] [model.cpp:95] Register 'DirectoryModel'
[2022-08-18 07:16:29.097] [mmdeploy] [info] [model.cpp:95] Register 'DirectoryModel'
2022-08-18 07:16:29,101 - mmdeploy - INFO - Start pipeline mmdeploy.apis.pytorch2onnx.torch2onnx in subprocess
load checkpoint from local path: /nas/liuchen/model/cloud/vehicle_swin_map82_36016bce.pth
/code3/mmdet/datasets/utils.py:66: UserWarning: "ImageToTensor" pipeline is replaced by "DefaultFormatBundle" for batch inference. It is recommended to manually replace it in the test data pipeline in your config file.
  warnings.warn(
2022-08-18 07:16:33,336 - mmdeploy - WARNING - DeprecationWarning: get_onnx_config will be deprecated in the future.
2022-08-18 07:16:33,336 - mmdeploy - INFO - Export PyTorch model to ONNX: work_dir/vehicle_swin/end2end.onnx.
2022-08-18 07:16:33,369 - mmdeploy - WARNING - Can not find torch._C._jit_pass_onnx_deduplicate_initializers, function rewrite will not be applied
/root/mmdeploy/mmdeploy/core/optimizers/function_marker.py:158: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  ys_shape = tuple(int(s) for s in ys.shape)
/root/mmdeploy/mmdeploy/codebase/mmdet/models/detectors/base.py:24: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).
  img_shape = [int(val) for val in img_shape]
/root/mmdeploy/mmdeploy/codebase/mmdet/models/detectors/base.py:24: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  img_shape = [int(val) for val in img_shape]
/code3/mmdet/models/utils/transformer.py:113: TracerWarning: Converting a tensor to a Python float might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  output_h = math.ceil(input_h / stride_h)
/code3/mmdet/models/utils/transformer.py:114: TracerWarning: Converting a tensor to a Python float might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  output_w = math.ceil(input_w / stride_w)
/code3/mmdet/models/utils/transformer.py:115: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  pad_h = max((output_h - 1) * stride_h +
/code3/mmdet/models/utils/transformer.py:117: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  pad_w = max((output_w - 1) * stride_w +
/code3/mmdet/models/utils/transformer.py:123: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  if pad_h > 0 or pad_w > 0:
/root/mmdeploy/mmdeploy/codebase/mmdet/models/backbones.py:188: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  assert L == H * W, 'input feature has wrong size'
/root/mmdeploy/mmdeploy/codebase/mmdet/models/backbones.py:201: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
  slice_h = (H + self.window_size - 1) // self.window_size * self.window_size
/root/mmdeploy/mmdeploy/codebase/mmdet/models/backbones.py:202: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
  slice_w = (W + self.window_size - 1) // self.window_size * self.window_size
/root/mmdeploy/mmdeploy/codebase/mmdet/models/backbones.py:145: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  B = int(windows.shape[0] / (H * W / window_size / window_size))
/root/mmdeploy/mmdeploy/codebase/mmdet/models/transformer.py:27: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  assert L == H * W, 'input feature has wrong size'
/root/mmdeploy/mmdeploy/codebase/mmdet/models/transformer.py:28: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!
  assert H % 2 == 0 and W % 2 == 0, f'x size ({H}*{W}) are not even.'
/root/mmdeploy/mmdeploy/codebase/mmdet/models/transformer.py:42: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
  out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] *
/root/mmdeploy/mmdeploy/codebase/mmdet/models/transformer.py:45: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
  out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] *
Process Process-2:
Traceback (most recent call last):
  File "/opt/conda/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
    self.run()
  File "/opt/conda/lib/python3.8/multiprocessing/process.py", line 108, in run
    self._target(*self._args, **self._kwargs)
  File "/root/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 107, in __call__
    ret = func(*args, **kwargs)
  File "/root/mmdeploy/mmdeploy/apis/pytorch2onnx.py", line 92, in torch2onnx
    export(
  File "/root/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 356, in _wrap
    return self.call_function(func_name_, *args, **kwargs)
  File "/root/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 326, in call_function
    return self.call_function_local(func_name, *args, **kwargs)
  File "/root/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 275, in call_function_local
    return pipe_caller(*args, **kwargs)
  File "/root/mmdeploy/mmdeploy/apis/core/pipeline_manager.py", line 107, in __call__
    ret = func(*args, **kwargs)
  File "/root/mmdeploy/mmdeploy/apis/onnx/export.py", line 122, in export
    torch.onnx.export(
  File "/opt/conda/lib/python3.8/site-packages/torch/onnx/__init__.py", line 305, in export
    return utils.export(model, args, f, export_params, verbose, training,
  File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 118, in export
    _export(model, args, f, export_params, verbose, training, input_names, output_names,
  File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 719, in _export
    _model_to_graph(model, args, verbose, input_names,
  File "/root/mmdeploy/mmdeploy/core/rewriters/rewriter_utils.py", line 379, in wrapper
    return self.func(self, *args, **kwargs)
  File "/root/mmdeploy/mmdeploy/apis/onnx/optimizer.py", line 10, in model_to_graph__custom_optimizer
    graph, params_dict, torch_out = ctx.origin_func(*args, **kwargs)
  File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 499, in _model_to_graph
    graph, params, torch_out, module = _create_jit_graph(model, args)
  File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 440, in _create_jit_graph
    graph, torch_out = _trace_and_get_graph_from_model(model, args)
  File "/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py", line 391, in _trace_and_get_graph_from_model
    torch.jit._get_trace_graph(model, args, strict=False, _force_outplace=False, _return_inputs_states=True)
  File "/opt/conda/lib/python3.8/site-packages/torch/jit/_trace.py", line 1166, in _get_trace_graph
    outs = ONNXTracedModule(f, strict, _force_outplace, return_inputs, _return_inputs_states)(*args, **kwargs)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/opt/conda/lib/python3.8/site-packages/torch/jit/_trace.py", line 127, in forward
    graph, out = torch._C._create_graph_by_tracing(
  File "/opt/conda/lib/python3.8/site-packages/torch/jit/_trace.py", line 118, in wrapper
    outs.append(self.inner(*trace_inputs))
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1098, in _slow_forward
    result = self.forward(*input, **kwargs)
  File "/root/mmdeploy/mmdeploy/core/rewriters/rewriter_utils.py", line 379, in wrapper
    return self.func(self, *args, **kwargs)
  File "/root/mmdeploy/mmdeploy/codebase/mmdet/models/detectors/base.py", line 70, in base_detector__forward
    return __forward_impl(ctx, self, img, img_metas=img_metas, **kwargs)
  File "/root/mmdeploy/mmdeploy/core/optimizers/function_marker.py", line 261, in g
    rets = f(*args, **kwargs)
  File "/root/mmdeploy/mmdeploy/codebase/mmdet/models/detectors/base.py", line 26, in __forward_impl
    return self.simple_test(img, img_metas, **kwargs)
  File "/root/mmdeploy/mmdeploy/core/rewriters/rewriter_utils.py", line 379, in wrapper
    return self.func(self, *args, **kwargs)
  File "/root/mmdeploy/mmdeploy/codebase/mmdet/models/detectors/two_stage.py", line 56, in two_stage_detector__simple_test
    x = self.extract_feat(img)
  File "/root/mmdeploy/mmdeploy/core/rewriters/rewriter_utils.py", line 379, in wrapper
    return self.func(self, *args, **kwargs)
  File "/root/mmdeploy/mmdeploy/core/optimizers/function_marker.py", line 261, in g
    rets = f(*args, **kwargs)
  File "/root/mmdeploy/mmdeploy/codebase/mmdet/models/detectors/two_stage.py", line 23, in two_stage_detector__extract_feat
    return ctx.origin_func(self, img)
  File "/code3/mmdet/models/detectors/two_stage.py", line 67, in extract_feat
    x = self.backbone(img)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1098, in _slow_forward
    result = self.forward(*input, **kwargs)
  File "/code3/mmdet/models/backbones/swin.py", line 754, in forward
    x, hw_shape, out, out_hw_shape = stage(x, hw_shape)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1098, in _slow_forward
    result = self.forward(*input, **kwargs)
  File "/code3/mmdet/models/backbones/swin.py", line 460, in forward
    x_down, down_hw_shape = self.downsample(x, hw_shape)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
    return forward_call(*input, **kwargs)
  File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1098, in _slow_forward
    result = self.forward(*input, **kwargs)
  File "/root/mmdeploy/mmdeploy/core/rewriters/rewriter_utils.py", line 379, in wrapper
    return self.func(self, *args, **kwargs)
  File "/root/mmdeploy/mmdeploy/codebase/mmdet/models/transformer.py", line 28, in patch_merging__forward__tensorrt
    assert H % 2 == 0 and W % 2 == 0, f'x size ({H}*{W}) are not even.'
AssertionError: x size (100*167) are not even.
2022-08-18 07:16:34,130 - mmdeploy - ERROR - `mmdeploy.apis.pytorch2onnx.torch2onnx` with Call id: 0 failed. exit.

Could you please tell me how to fix this problem?Thank you!

Env

ubuntu 18.04
cuda 11.4
TensorRT-8.4

rylynchen avatar Aug 18 '22 07:08 rylynchen

Did you try swin-transformer for instance segmentation? If that worked fine for you. You my consider debug the customized config step by step then.

AllentDan avatar Aug 18 '22 07:08 AllentDan

Closing it for no activity for a long time, feel free to reopen it if it is still an issue for you.

AllentDan avatar Sep 29 '22 09:09 AllentDan