BEVDet
BEVDet copied to clipboard
Exploration on Transformer-based Head
Hi, since there are plenty of models working with transformer-based head, BEVFormer, PolarFormer, PETR, I wonder if you have tried a transformer-based head? I tried one with a swin-T backbone and init it from a pretrain BEVDet-T. The transformer head is similar to the one in the Object-DGCNN. However, the model seems not converge well (ends up with 1.2 mAP) Therefore I wonder if you have some attempts on it :) Here is my train config:
_base_ = ['../_base_/datasets/nus-3d.py',
'../_base_/schedules/cyclic_20e.py',
'../_base_/default_runtime.py']
# Global
# If point cloud range is changed, the models should also change their point
# cloud range accordingly
point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0]
# For nuScenes we usually do 10-class detection
class_names = [
'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier',
'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'
]
data_config={
'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'],
'Ncams': 6,
'input_size': (256, 704),
'src_size': (900, 1600),
# Augmentation
'resize': (-0.06, 0.11),
'rot': (-5.4, 5.4),
'flip': True,
'crop_h': (0.0, 0.0),
'resize_test':0.04,
}
# Model
grid_config={
'xbound': [-51.2, 51.2, 0.8],
'ybound': [-51.2, 51.2, 0.8],
'zbound': [-10.0, 10.0, 20.0],
'dbound': [1.0, 60.0, 1.0],}
voxel_size = [0.1, 0.1, 0.2]
numC_Trans=64
model = dict(
type='BEVDet',
img_backbone=dict(
type='SwinTransformer',
pretrained='data/pretrain_models/swin_tiny_patch4_window7_224.pth',
pretrain_img_size=224,
embed_dims=96,
patch_size=4,
window_size=7,
mlp_ratio=4,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
strides=(4, 2, 2, 2),
out_indices=(2, 3,),
qkv_bias=True,
qk_scale=None,
patch_norm=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.0,
use_abs_pos_embed=False,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN', requires_grad=True),
pretrain_style='official',
output_missing_index_as_none=False),
img_neck=dict(
type='FPN_LSS',
in_channels=384+768,
out_channels=512,
extra_upsample=None,
input_feature_index=(0,1),
scale_factor=2),
img_view_transformer=dict(type='ViewTransformerLiftSplatShoot',
grid_config=grid_config,
data_config=data_config,
numC_Trans=numC_Trans),
img_bev_encoder_backbone = dict(type='ResNetForBEVDet', numC_input=numC_Trans),
img_bev_encoder_neck = dict(type='FPN_LSS',
in_channels=numC_Trans*8+numC_Trans*2,
out_channels=256),
pts_bbox_head=dict(
type='DGCNN3DHead',
num_query=300,
num_classes=10,
in_channels=256,
sync_cls_avg_factor=True,
with_box_refine=True,
as_two_stage=False,
# share_conv_channel=256,
# tasks=[
# dict(num_class=10, class_names=class_names),
# ],
transformer=dict(
type='DeformableDetrTransformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=2,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiScaleDeformableAttention', embed_dims=256),
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='Deformable3DDetrTransformerDecoder',
num_layers=6,
return_intermediate=True,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
dict(
type='MultiScaleDeformableAttention',
embed_dims=256)
],
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')))),
bbox_coder=dict(
type='NMSFreeCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
pc_range=point_cloud_range,
max_num=300,
voxel_size=voxel_size,
num_classes=10),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=128,
normalize=True,
offset=-0.5),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
loss_bbox=dict(type='L1Loss', loss_weight=0.5),
loss_iou=dict(type='GIoULoss', loss_weight=0.0)), # For DETR compatibility.
# model training and testing settings
train_cfg=dict(pts=dict(
grid_size=[1024, 1024, 1],
voxel_size=voxel_size,
point_cloud_range=point_cloud_range,
out_size_factor=8,
assigner=dict(
type='HungarianAssigner3D',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBox3DL1Cost', weight=0.5),
iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head.
pc_range=point_cloud_range))),
test_cfg=dict(
pts=dict(
use_rotate_nms=True,
nms_across_levels=True,
nms_pre=1000,
nms_thr=0.2,
score_thr=0.05,
min_bbox_size=0,
max_num=100)
))
# Data
dataset_type = 'NuScenesDataset'
data_root = 'data/nuscenes/'
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadMultiViewImageFromFiles_BEVDet', is_train=True, data_config=data_config),
dict(
type='LoadPointsFromFile',
dummy=True,
coord_type='LIDAR',
load_dim=5,
use_dim=5,
file_client_args=file_client_args),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.3925, 0.3925],
scale_ratio_range=[0.95, 1.05],
translation_std=[0, 0, 0],
update_img2lidar=True),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5,
update_img2lidar=True),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectNameFilter', classes=class_names),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='Collect3D', keys=['img_inputs', 'gt_bboxes_3d', 'gt_labels_3d'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img',
'depth2img', 'cam2img', 'pad_shape',
'scale_factor', 'flip', 'pcd_horizontal_flip',
'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d',
'img_norm_cfg', 'pcd_trans', 'sample_idx',
'pcd_scale_factor', 'pcd_rotation', 'pts_filename',
'transformation_3d_flow', 'img_info'))
]
test_pipeline = [
dict(type='LoadMultiViewImageFromFiles_BEVDet', data_config=data_config),
# load lidar points for --show in test.py only
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
file_client_args=file_client_args),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points','img_inputs'])
])
]
# construct a pipeline for data and gt loading in show function
# please keep its loading function consistent with test_pipeline (e.g. client)
eval_pipeline = [
dict(type='LoadMultiViewImageFromFiles_BEVDet', data_config=data_config),
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['img_inputs'])
]
input_modality = dict(
use_lidar=False,
use_camera=True,
use_radar=False,
use_map=False,
use_external=False)
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type='CBGSDataset',
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file=data_root + 'nuscenes_infos_train.pkl',
pipeline=train_pipeline,
classes=class_names,
test_mode=False,
use_valid_flag=True,
modality=input_modality,
load_interval=2,
# we use box_type_3d='LiDAR' in kitti and nuscenes dataset
# and box_type_3d='Depth' in sunrgbd and scannet dataset.
box_type_3d='LiDAR',
img_info_prototype='bevdet')),
val=dict(pipeline=test_pipeline, classes=class_names,
modality=input_modality, img_info_prototype='bevdet'),
test=dict(pipeline=test_pipeline, classes=class_names,
modality=input_modality, img_info_prototype='bevdet'))
# Optimizer
lr_config = dict(
policy='cyclic',
target_ratio=(5, 1e-4),
cyclic_times=1,
step_ratio_up=0.4,
)
optimizer = dict(
type='AdamW',
lr=2e-4,
paramwise_cfg=dict(
custom_keys={
'img_backbone': dict(lr_mult=0.1),
'img_neck': dict(lr_mult=0.1),
'img_view_transformer': dict(lr_mult=0.1),
'img_bev_encoder_backbone': dict(lr_mult=0.1),
'img_bev_encoder_neck': dict(lr_mult=0.1),
}),
weight_decay=0.01)
evaluation = dict(interval=6, pipeline=eval_pipeline)
load_from='/nfs/chenzehui/code/BEVDet/work_dirs/bevdet-sttiny/epoch_20.pth'
checkpoint_config = dict(interval=6)
total_epochs = 12
runner = dict(type='EpochBasedRunner', max_epochs=total_epochs)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
@zehuichen123 I haven't tried this and which code base did you refer to for the Object-DGCNN head?
@HuangJunJie2017 In this codebase https://github.com/WangYueFt/detr3d.