nni
nni copied to clipboard
yolox使用L1NormPrune后,模型大小仅仅从3797K降低到3784K
img_size = 416
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = YoloBody(num_classes, phi)
model.load_state_dict(torch.load(weights, map_location=device))
#summary(model, (3, 64, 64), device="cpu")
print('\nThe accuracy with masks:')
model_train = model.train()
yolo_loss = YOLOLoss(3)
#---------------------------------------#
# 根据optimizer_type选择优化器
#---------------------------------------#
pg0, pg1, pg2 = [], [], []
for k, v in model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias)
if isinstance(v, nn.BatchNorm2d) or "bn" in k:
pg0.append(v.weight)
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight)
optimizer = {
'adam' : optim.Adam(pg0, Init_lr_fit, betas = (momentum, 0.999)),
'sgd' : optim.SGD(pg0, Init_lr_fit, momentum = momentum, nesterov=True)
}[optimizer_type]
optimizer.add_param_group({"params": pg1, "weight_decay": weight_decay})
optimizer.add_param_group({"params": pg2})
criterion = torch.nn.CrossEntropyLoss()
traced_optimizer = nni.trace(torch.optim.SGD)(model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
print(model)
fit_one_epoch(model_train, model, yolo_loss, optimizer, 3, num_train/2, epoch_step_val, gen, gen_val, UnFreeze_Epoch, True)
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.5}]
pruner=L1NormPruner(model, config_list)
pruner.compress()
#pruner = SlimPruner(model, config_list,trainer, traced_optimizer, criterion, training_epochs=1, scale=0.0001, mode='global')
pruner.export_model(model_path='pruned_yolov5n-0.5.pth', mask_path='mask_yolov5n-0.5.pth')
pruner._unwrap_model()
ModelSpeedup(model, dummy_input=torch.rand([8, 3, 64, 64]).to("cpu"),masks_file='mask_yolov5n-0.5.pth').speedup_model()
#pruner.show_pruned_weights()
print(model)
fit_one_epoch(model_train, model, yolo_loss, optimizer, 3, num_train/2, epoch_step_val, gen, gen_val, UnFreeze_Epoch, True)
torch.save(model.state_dict(), os.path.join('logs', "yolo_prune.pth"))
#apply_compression_results(model, 'mask_yolov5n-0.5.pth', "cpu")
#torch.save(model.state_dict(), "use_mask_small_model.pth")
print('\n' + '=' * 50 + ' EVALUATE THE MODEL AFTER SPEEDUP ' + '=' * 50)
以上是我的代码,在剪枝和加速运行后模型大小依旧没去掉多少,Conv2d依旧存在,请问这个问题需要如何解决呢,求大佬解答
@maziyi234 是不是还没run speedup?
@maziyi234是不是运行加速?
在运行加速时候 ,我的模型是func类,怎么把它展开变成module类型呢,我报的警告如下: .warning 'Cannot replace a reused module with padding operator!!'
@maziyi234是不是运行加速?
或者 是什么原因导致 剪枝加速后,模型大小不下降呢
hello @maziyi234 , could you show where can find YoloBody
, we will try to reproduce your problem.
你好@maziyi234,你可以在哪里找到
YoloBody
,我们将试探在你的问题上。
class YoloBody(nn.Module): def init(self, num_classes, phi): super().init() depth_dict = {'nano': 0.33, 'tiny': 0.33, 's' : 0.33, 'm' : 0.67, 'l' : 1.00, 'x' : 1.33,} width_dict = {'nano': 0.25, 'tiny': 0.375, 's' : 0.50, 'm' : 0.75, 'l' : 1.00, 'x' : 1.25,} depth, width = depth_dict[phi], width_dict[phi] depthwise = True if phi == 'nano' else False
self.backbone = YOLOPAFPN(depth, width, depthwise=depthwise)
self.head = YOLOXHead(num_classes, width, depthwise=depthwise)
def forward(self, x):
fpn_outs = self.backbone.forward(x)
outputs = self.head.forward(fpn_outs)
return outputs
你好@maziyi234,你可以在哪里找到
YoloBody
,我们将试探你的问题。
Uploading yolo.txt…
你好@maziyi234,你可以在哪里找到
YoloBody
,我们将试探你的问题。Uploading yolo.txt…
@maziyi234 , seems you comment before your file uploading finished, please try to upload again.
你好@maziyi234,你可以在哪里找到
YoloBody
,我们将试探你的问题。正在上传 yolo.txt...
@maziyi234,看来您在文件上传完成前发表评论,请尝试重新上传。
链接:https://pan.baidu.com/s/10hpe-dsLqq5xrkOtoYFBTg 提取码:ovix
there are some messy issues when speedup your yolo model, we will let you know when they are fixed.