ncnn
ncnn copied to clipboard
转换lightglue模型报错
error log | 日志或报错信息 | ログ
导出日志
C:\Users\SalleeLaptop\Documents\ProgramProject\python\LightGlue-ONNX\lightglue_dynamo\models\superpoint.py:161: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.
one = torch.tensor(1) # Always constant, safe to ignore warning.
pnnxparam = weights\ncnn\gim_lightglue.pnnx.param
pnnxbin = weights\ncnn\gim_lightglue.pnnx.bin
pnnxpy = weights\ncnn\gim_lightglue_pnnx.py
pnnxonnx = weights\ncnn\gim_lightglue.pnnx.onnx
ncnnparam = weights\ncnn\gim_lightglue.ncnn.param
ncnnbin = weights\ncnn\gim_lightglue.ncnn.bin
ncnnpy = weights\ncnn\gim_lightglue_ncnn.py
fp16 = 1
optlevel = 2
device = cpu
inputshape = [2,1,256,256]f32
inputshape2 = [4,1,512,512]f32
customop =
moduleop =
get inputshape from traced inputs
inputshape = [2,1,256,256]f32
############# pass_level0
inline module = lightglue_dynamo.models.lightglue.CrossBlock
inline module = lightglue_dynamo.models.lightglue.LearnableFourierPositionalEncoding
inline module = lightglue_dynamo.models.lightglue.LightGlue
inline module = lightglue_dynamo.models.lightglue.MatchAssignment
inline module = lightglue_dynamo.models.lightglue.SelfBlock
inline module = lightglue_dynamo.models.lightglue.TransformerLayer
inline module = lightglue_dynamo.models.superpoint.SuperPoint
inline module = torch.nn.modules.linear.Identity
inline module = lightglue_dynamo.models.lightglue.CrossBlock
inline module = lightglue_dynamo.models.lightglue.LearnableFourierPositionalEncoding
inline module = lightglue_dynamo.models.lightglue.LightGlue
inline module = lightglue_dynamo.models.lightglue.MatchAssignment
inline module = lightglue_dynamo.models.lightglue.SelfBlock
inline module = lightglue_dynamo.models.lightglue.TransformerLayer
inline module = lightglue_dynamo.models.superpoint.SuperPoint
inline module = torch.nn.modules.linear.Identity
----------------
assign dynamic shape info
############# pass_level1
############# pass_level2
############# pass_level3
############# pass_level4
############# pass_level5
############# pass_ncnn
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
BinaryOp remainder not supported yet
BinaryOp floor_divide not supported yet
force batch axis 233 for operand 76
force batch axis 233 for operand 77
force batch axis 233 for operand 84
force batch axis 233 for operand 91
force batch axis 233 for operand 134
force batch axis 233 for operand 187
force batch axis 233 for operand 240
force batch axis 233 for operand 293
force batch axis 233 for operand 346
force batch axis 233 for operand 399
force batch axis 233 for operand 452
force batch axis 233 for operand 505
force batch axis 233 for operand 558
force batch axis 233 for operand 595
force batch axis 233 for operand 604
force batch axis 233 for operand 605
force batch axis 233 for operand 607
force batch axis 233 for operand 612
force batch axis 233 for operand pnnx_expr_1078_remainder(134,2)
force batch axis 233 for operand pnnx_expr_1078_sub(1,remainder(134,2))
force batch axis 233 for operand pnnx_expr_1078_floor_divide(134,2)
force batch axis 233 for operand pnnx_expr_1078_mul(floor_divide(134,2),2)
force batch axis 233 for operand pnnx_expr_1078_add(mul(floor_divide(134,2),2),sub(1,remainder(134,2)))
force batch axis 233 for operand pnnx_expr_957_remainder(187,2)
force batch axis 233 for operand pnnx_expr_957_sub(1,remainder(187,2))
force batch axis 233 for operand pnnx_expr_957_floor_divide(187,2)
force batch axis 233 for operand pnnx_expr_957_mul(floor_divide(187,2),2)
force batch axis 233 for operand pnnx_expr_957_add(mul(floor_divide(187,2),2),sub(1,remainder(187,2)))
force batch axis 233 for operand pnnx_expr_836_remainder(240,2)
force batch axis 233 for operand pnnx_expr_836_sub(1,remainder(240,2))
force batch axis 233 for operand pnnx_expr_836_floor_divide(240,2)
force batch axis 233 for operand pnnx_expr_836_mul(floor_divide(240,2),2)
force batch axis 233 for operand pnnx_expr_836_add(mul(floor_divide(240,2),2),sub(1,remainder(240,2)))
force batch axis 233 for operand pnnx_expr_715_remainder(293,2)
force batch axis 233 for operand pnnx_expr_715_sub(1,remainder(293,2))
force batch axis 233 for operand pnnx_expr_715_floor_divide(293,2)
force batch axis 233 for operand pnnx_expr_715_mul(floor_divide(293,2),2)
force batch axis 233 for operand pnnx_expr_715_add(mul(floor_divide(293,2),2),sub(1,remainder(293,2)))
force batch axis 233 for operand pnnx_expr_594_remainder(346,2)
force batch axis 233 for operand pnnx_expr_594_sub(1,remainder(346,2))
force batch axis 233 for operand pnnx_expr_594_floor_divide(346,2)
force batch axis 233 for operand pnnx_expr_594_mul(floor_divide(346,2),2)
force batch axis 233 for operand pnnx_expr_594_add(mul(floor_divide(346,2),2),sub(1,remainder(346,2)))
force batch axis 233 for operand pnnx_expr_473_remainder(399,2)
force batch axis 233 for operand pnnx_expr_473_sub(1,remainder(399,2))
force batch axis 233 for operand pnnx_expr_473_floor_divide(399,2)
force batch axis 233 for operand pnnx_expr_473_mul(floor_divide(399,2),2)
force batch axis 233 for operand pnnx_expr_473_add(mul(floor_divide(399,2),2),sub(1,remainder(399,2)))
force batch axis 233 for operand pnnx_expr_352_remainder(452,2)
force batch axis 233 for operand pnnx_expr_352_sub(1,remainder(452,2))
force batch axis 233 for operand pnnx_expr_352_floor_divide(452,2)
force batch axis 233 for operand pnnx_expr_352_mul(floor_divide(452,2),2)
force batch axis 233 for operand pnnx_expr_352_add(mul(floor_divide(452,2),2),sub(1,remainder(452,2)))
force batch axis 233 for operand pnnx_expr_231_remainder(505,2)
force batch axis 233 for operand pnnx_expr_231_sub(1,remainder(505,2))
force batch axis 233 for operand pnnx_expr_231_floor_divide(505,2)
force batch axis 233 for operand pnnx_expr_231_mul(floor_divide(505,2),2)
force batch axis 233 for operand pnnx_expr_231_add(mul(floor_divide(505,2),2),sub(1,remainder(505,2)))
force batch axis 233 for operand pnnx_expr_110_remainder(558,2)
force batch axis 233 for operand pnnx_expr_110_sub(1,remainder(558,2))
force batch axis 233 for operand pnnx_expr_110_floor_divide(558,2)
force batch axis 233 for operand pnnx_expr_110_mul(floor_divide(558,2),2)
force batch axis 233 for operand pnnx_expr_110_add(mul(floor_divide(558,2),2),sub(1,remainder(558,2)))
fallback batch axis 233 for operand 30
fallback batch axis 233 for operand 31
fallback batch axis 233 for operand 106
fallback batch axis 233 for operand 108
fallback batch axis 233 for operand 107
fallback batch axis 233 for operand 110
fallback batch axis 233 for operand 117
fallback batch axis 233 for operand 118
fallback batch axis 233 for operand 119
fallback batch axis 233 for operand 120
fallback batch axis 233 for operand 141
fallback batch axis 233 for operand 142
fallback batch axis 233 for operand 143
fallback batch axis 233 for operand 144
fallback batch axis 233 for operand 159
fallback batch axis 233 for operand 161
fallback batch axis 233 for operand 160
fallback batch axis 233 for operand 163
fallback batch axis 233 for operand 170
fallback batch axis 233 for operand 171
fallback batch axis 233 for operand 172
fallback batch axis 233 for operand 173
fallback batch axis 233 for operand 194
fallback batch axis 233 for operand 195
fallback batch axis 233 for operand 196
fallback batch axis 233 for operand 197
fallback batch axis 233 for operand 212
fallback batch axis 233 for operand 214
fallback batch axis 233 for operand 213
fallback batch axis 233 for operand 216
fallback batch axis 233 for operand 223
fallback batch axis 233 for operand 224
fallback batch axis 233 for operand 225
fallback batch axis 233 for operand 226
fallback batch axis 233 for operand 247
fallback batch axis 233 for operand 248
fallback batch axis 233 for operand 249
fallback batch axis 233 for operand 250
fallback batch axis 233 for operand 265
fallback batch axis 233 for operand 267
fallback batch axis 233 for operand 266
fallback batch axis 233 for operand 269
fallback batch axis 233 for operand 276
fallback batch axis 233 for operand 277
fallback batch axis 233 for operand 278
fallback batch axis 233 for operand 279
fallback batch axis 233 for operand 300
fallback batch axis 233 for operand 301
fallback batch axis 233 for operand 302
fallback batch axis 233 for operand 303
fallback batch axis 233 for operand 318
fallback batch axis 233 for operand 320
fallback batch axis 233 for operand 319
fallback batch axis 233 for operand 322
fallback batch axis 233 for operand 329
fallback batch axis 233 for operand 330
fallback batch axis 233 for operand 331
fallback batch axis 233 for operand 332
fallback batch axis 233 for operand 353
fallback batch axis 233 for operand 354
fallback batch axis 233 for operand 355
fallback batch axis 233 for operand 356
fallback batch axis 233 for operand 371
fallback batch axis 233 for operand 373
fallback batch axis 233 for operand 372
fallback batch axis 233 for operand 375
fallback batch axis 233 for operand 382
fallback batch axis 233 for operand 383
fallback batch axis 233 for operand 384
fallback batch axis 233 for operand 385
fallback batch axis 233 for operand 406
fallback batch axis 233 for operand 407
fallback batch axis 233 for operand 408
fallback batch axis 233 for operand 409
fallback batch axis 233 for operand 424
fallback batch axis 233 for operand 426
fallback batch axis 233 for operand 425
fallback batch axis 233 for operand 428
fallback batch axis 233 for operand 435
fallback batch axis 233 for operand 436
fallback batch axis 233 for operand 437
fallback batch axis 233 for operand 438
fallback batch axis 233 for operand 459
fallback batch axis 233 for operand 460
fallback batch axis 233 for operand 461
fallback batch axis 233 for operand 462
fallback batch axis 233 for operand 477
fallback batch axis 233 for operand 479
fallback batch axis 233 for operand 478
fallback batch axis 233 for operand 481
fallback batch axis 233 for operand 488
fallback batch axis 233 for operand 489
fallback batch axis 233 for operand 490
fallback batch axis 233 for operand 491
fallback batch axis 233 for operand 512
fallback batch axis 233 for operand 513
fallback batch axis 233 for operand 514
fallback batch axis 233 for operand 515
fallback batch axis 233 for operand 530
fallback batch axis 233 for operand 532
fallback batch axis 233 for operand 531
fallback batch axis 233 for operand 534
fallback batch axis 233 for operand 541
fallback batch axis 233 for operand 542
fallback batch axis 233 for operand 543
fallback batch axis 233 for operand 544
fallback batch axis 233 for operand 565
fallback batch axis 233 for operand 566
fallback batch axis 233 for operand 567
fallback batch axis 233 for operand 568
fallback batch axis 233 for operand pnnx_expr_1140_neg(108)
fallback batch axis 233 for operand pnnx_expr_1019_neg(161)
fallback batch axis 233 for operand pnnx_expr_898_neg(214)
fallback batch axis 233 for operand pnnx_expr_777_neg(267)
fallback batch axis 233 for operand pnnx_expr_656_neg(320)
fallback batch axis 233 for operand pnnx_expr_535_neg(373)
fallback batch axis 233 for operand pnnx_expr_414_neg(426)
fallback batch axis 233 for operand pnnx_expr_293_neg(479)
fallback batch axis 233 for operand pnnx_expr_172_neg(532)
binaryop broadcast across batch axis 0 and 233 is not supported
binaryop broadcast across batch axis 0 and 233 is not supported
binaryop broadcast across batch axis 0 and 233 is not supported
binaryop broadcast across batch axis 0 and 233 is not supported
stack along batch axis 0 is not supported
stack along batch axis 0 is not supported
unbind along batch axis 0 is not supported
reshape expression refer to batch axis 0 is not supported
convert reshape expression [size(@0,0),8,8,size(@1,2),size(@2,3)] => 0w,0h,8,8,size(@0,0)
reshape expression refer to batch axis 0 is not supported
convert reshape expression [size(@0,0),mul(mul(mul(size(@1,2),8),size(@2,3)),8)] => *(*(*(1h,8),1w),8)
unsupported normalize for 2-rank tensor with axis 0
permute 5-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
reshape to 6-rank tensor is not supported yet!
ignore pnnx.Expression pnnx_expr_1339 param expr=size(@0,2)
ignore pnnx.Expression pnnx_expr_1337 param expr=size(@0,3)
ignore pnnx.Expression pnnx_expr_1324 param expr=6
ignore pnnx.Expression pnnx_expr_1323 param expr=11
ignore torch.zeros_like torch.zeros_like_115 param dtype=None
ignore pnnx.Expression pnnx_expr_1294 param expr=False
ignore pnnx.Expression pnnx_expr_1289 param expr=or(@0,and(@1,@2))
ignore pnnx.Expression pnnx_expr_1281 param expr=or(@0,and(@1,@2))
ignore Tensor.fill Tensor.fill_360 param value=-1.000000e+00
ignore Tensor.fill Tensor.fill_361 param value=-1.000000e+00
ignore Tensor.fill Tensor.fill_362 param value=-1.000000e+00
ignore Tensor.fill Tensor.fill_363 param value=-1.000000e+00
ignore torch.topk torch.topk_123 param dim=-1
ignore torch.topk torch.topk_123 param k=1024
ignore torch.topk torch.topk_123 param largest=True
ignore torch.topk torch.topk_123 param sorted=True
ignore torch.flip torch.flip_157 param dims=(2)
ignore Concat stack_0 param dim=0
ignore Concat stack_1 param dim=0
ignore torch.repeat_interleave torch.repeat_interleave_158 param dim=3
ignore torch.repeat_interleave torch.repeat_interleave_158 param repeats=2
ignore Slice unbind_1 param dim=0
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_603 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_603 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_603 param is_causal=False
ignore pnnx.Expression pnnx_expr_1086 param expr=size(@0,0)
ignore torch.arange torch.arange_105 param dtype=None
ignore pnnx.Expression pnnx_expr_1077 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_604 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_604 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_604 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_605 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_605 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_605 param is_causal=False
ignore pnnx.Expression pnnx_expr_965 param expr=size(@0,0)
ignore torch.arange torch.arange_106 param dtype=None
ignore pnnx.Expression pnnx_expr_956 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_606 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_606 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_606 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_607 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_607 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_607 param is_causal=False
ignore pnnx.Expression pnnx_expr_844 param expr=size(@0,0)
ignore torch.arange torch.arange_107 param dtype=None
ignore pnnx.Expression pnnx_expr_835 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_608 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_608 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_608 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_609 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_609 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_609 param is_causal=False
ignore pnnx.Expression pnnx_expr_723 param expr=size(@0,0)
ignore torch.arange torch.arange_108 param dtype=None
ignore pnnx.Expression pnnx_expr_714 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_610 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_610 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_610 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_611 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_611 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_611 param is_causal=False
ignore pnnx.Expression pnnx_expr_602 param expr=size(@0,0)
ignore torch.arange torch.arange_109 param dtype=None
ignore pnnx.Expression pnnx_expr_593 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_612 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_612 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_612 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_613 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_613 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_613 param is_causal=False
ignore pnnx.Expression pnnx_expr_481 param expr=size(@0,0)
ignore torch.arange torch.arange_110 param dtype=None
ignore pnnx.Expression pnnx_expr_472 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_614 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_614 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_614 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_615 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_615 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_615 param is_causal=False
ignore pnnx.Expression pnnx_expr_360 param expr=size(@0,0)
ignore torch.arange torch.arange_111 param dtype=None
ignore pnnx.Expression pnnx_expr_351 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_616 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_616 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_616 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_617 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_617 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_617 param is_causal=False
ignore pnnx.Expression pnnx_expr_239 param expr=size(@0,0)
ignore torch.arange torch.arange_112 param dtype=None
ignore pnnx.Expression pnnx_expr_230 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_618 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_618 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_618 param is_causal=False
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_619 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_619 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_619 param is_causal=False
ignore pnnx.Expression pnnx_expr_118 param expr=size(@0,0)
ignore torch.arange torch.arange_113 param dtype=None
ignore pnnx.Expression pnnx_expr_109 param expr=[@0]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_620 param attn_mask=None
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_620 param dropout_p=0.000000e+00
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_620 param is_causal=False
ignore torch.topk torch.topk_124 param dim=2
ignore torch.topk torch.topk_124 param k=1
ignore torch.topk torch.topk_124 param largest=True
ignore torch.topk torch.topk_124 param sorted=False
ignore torch.topk torch.topk_125 param dim=1
ignore torch.topk torch.topk_125 param k=1
ignore torch.topk torch.topk_125 param largest=True
ignore torch.topk torch.topk_125 param sorted=False
ignore torch.gather torch.gather_583 param dim=1
ignore torch.gt torch.gt_120 param other=1.000000e-01
ignore pnnx.Expression pnnx_expr_15 param expr=and(@0,@1)
ignore pnnx.Expression pnnx_expr_14 param expr=[@0,@1]
ignore F.scaled_dot_product_attention F.scaled_dot_product_attention_620 param is_causal=False
ignore torch.topk torch.topk_124 param dim=2
ignore torch.topk torch.topk_124 param k=1
ignore torch.topk torch.topk_124 param largest=True
ignore torch.topk torch.topk_124 param sorted=False
ignore torch.topk torch.topk_125 param dim=1
ignore torch.topk torch.topk_125 param k=1
ignore torch.topk torch.topk_125 param largest=True
ignore torch.topk torch.topk_125 param sorted=False
ignore torch.gather torch.gather_583 param dim=1
ignore torch.gt torch.gt_120 param other=1.000000e-01
ignore pnnx.Expression pnnx_expr_15 param expr=and(@0,@1)
ignore pnnx.Expression pnnx_expr_14 param expr=[@0,@1]
ignore torch.topk torch.topk_124 param k=1
ignore torch.topk torch.topk_124 param largest=True
ignore torch.topk torch.topk_124 param sorted=False
ignore torch.topk torch.topk_125 param dim=1
ignore torch.topk torch.topk_125 param k=1
ignore torch.topk torch.topk_125 param largest=True
ignore torch.topk torch.topk_125 param sorted=False
ignore torch.gather torch.gather_583 param dim=1
ignore torch.gt torch.gt_120 param other=1.000000e-01
ignore pnnx.Expression pnnx_expr_15 param expr=and(@0,@1)
ignore pnnx.Expression pnnx_expr_14 param expr=[@0,@1]
ignore torch.gather torch.gather_583 param dim=1
ignore torch.gt torch.gt_120 param other=1.000000e-01
ignore pnnx.Expression pnnx_expr_15 param expr=and(@0,@1)
ignore pnnx.Expression pnnx_expr_14 param expr=[@0,@1]
Value(False)
报错信息
发生异常: SyntaxError
invalid syntax (gim_lightglue_pnnx.py, line 445)
File "C:\Users\SalleeLaptop\Documents\ProgramProject\python\LightGlue-ONNX\dynamo.py", line 234, in export_pnnx
pnnx.export(pipeline,str(output),dummy_input,dummy_input2)
File "C:\Users\SalleeLaptop\Documents\ProgramProject\python\LightGlue-ONNX\dynamo.py", line 430, in <module>
app()
SyntaxError: invalid syntax (gim_lightglue_pnnx.py, line 445)
model | 模型 | モデル
- superpoint+lightglue
how to reproduce | 复现步骤 | 再現方法
- 克隆项目LightGlue_ONNX,并准备虚拟环境,安装pnnx
- 准备模型文件,放到weights里
- 在dynamo.py中添加一个新命令
@app.command()
def export_pnnx(
extractor_type: Annotated[Extractor, typer.Argument()] = Extractor.superpoint,
weight_path: Annotated[
Optional[Path],
typer.Option( "--weights", dir_okay=False, readable=True, help="Path to weights file."),
] = None,
namespace_extractor: Annotated[
str,
typer.Option("--name-extractor", help="Namespace for extractor model. Defaults is extractor"),
] = "extractor",
namespace_matcher: Annotated[
str,
typer.Option("--name-matcher", help="Namespace for matcher model. Defaults is matcher"),
] = "matcher",
output: Annotated[
Optional[Path],
typer.Option("-o", "--output", dir_okay=False, writable=True, help="Path to save exported model."),
] = None,
batch_size: Annotated[
int,
typer.Option("-b", "--batch-size", min=0, help="Batch size of exported model. Set to 0 to mark as dynamic."),
] = 0,
height: Annotated[
int, typer.Option("-h", "--height", min=0, help="Height of input image. Set to 0 to mark as dynamic.")
] = 0,
width: Annotated[
int, typer.Option("-w", "--width", min=0, help="Width of input image. Set to 0 to mark as dynamic.")
] = 0,
num_keypoints: Annotated[
int, typer.Option(min=128, help="Number of keypoints outputted by feature extractor.")
] = 1024,
fp16: Annotated[bool, typer.Option("--fp16", help="Whether to also convert to FP16.")] = False,
):
"""Export LightGlue to PNNX (NCNN) format."""
import torch
from lightglue_dynamo.models import DISK, LightGlue, Pipeline, SuperPoint
# 1. 构建模型
match extractor_type:
case Extractor.superpoint:
extractor = SuperPoint(num_keypoints=num_keypoints)
case Extractor.disk:
extractor = DISK(num_keypoints=num_keypoints)
matcher = LightGlue(**extractor_type.lightglue_config)
pipeline = Pipeline(extractor, matcher).eval()
# 2. 加载权重
if weight_path is not None:
weight_dict = torch.load(weight_path)["state_dict"]
weight_keys = list(weight_dict.keys())
for weight_key in weight_keys:
if weight_key.startswith(namespace_extractor):
weight_dict[weight_key.replace(namespace_extractor, "extractor")] = weight_dict[weight_key]
del weight_dict[weight_key]
if weight_key.startswith(namespace_matcher):
weight_dict[weight_key.replace(namespace_matcher, "matcher")] = weight_dict[weight_key]
del weight_dict[weight_key]
pipeline.load_state_dict(weight_dict, strict=False)
# 3. 设置输出路径
if output is None:
output = Path(f"weights/{extractor_type}_lightglue_pipeline.pt")
else:
output = output.with_suffix(".pt")
# 4. 构造 dummy input
dummy_input = torch.randn(
batch_size or 2,
extractor_type.input_channels,
height or 256,
width or 256
)
dummy_input2 = torch.randn(
dummy_input.shape[0] if batch_size != 0 else dummy_input.shape[0] * 2,
dummy_input.shape[1],
dummy_input.shape[2] if height != 0 else dummy_input.shape[2] * 2,
dummy_input.shape[3] if width != 0 else dummy_input.shape[3] * 2
)
pnnx.export(pipeline,str(output),dummy_input,dummy_input2)
- 运行dynamo.py
python export-pnnx --weights weights/gim_lightglue_100h.ckpt --num-keypoints 1024 -o weights/ncnn/gim_lightglue.pnnx --fp16
报错的代码片段
v_33 = torch.zeros_like(input=v_32, dtype=None)
v_34 = F.max_pool2d(input=v_32, ceil_mode=False, dilation=(1,1), kernel_size=(9,9), padding=(4,4), return_indices=False, stride=(1,1))
v_35 = torch.eq(input=v_32, other=v_34)
v_36 = False
v_37 = aten::to(v_35, v_3, v_36, v_36) #445行
v_38 = F.max_pool2d(input=v_37, ceil_mode=False, dilation=(1,1), kernel_size=(9,9), padding=(4,4), return_indices=False, stride=(1,1))
v_39 = aten::to(v_38, v_4, v_36, v_36)
v_40 = torch.where(condition=v_39, input=v_33, other=v_32)
v_41 = F.max_pool2d(input=v_40, ceil_mode=False, dilation=(1,1), kernel_size=(9,9), padding=(4,4), return_indices=False, stride=(1,1))
v_42 = torch.eq(input=v_40, other=v_41)
v_43 = torch.bitwise_not(input=v_39)
v_44 = (v_35 | (v_42 & v_43))
v_45 = aten::to(v_44, v_3, v_36, v_36)
v_46 = F.max_pool2d(input=v_45, ceil_mode=False, dilation=(1,1), kernel_size=(9,9), padding=(4,4), return_indices=False, stride=(1,1))
v_47 = aten::to(v_46, v_4, v_36, v_36)
v_48 = torch.where(condition=v_47, input=v_33, other=v_32)
v_49 = F.max_pool2d(input=v_48, ceil_mode=False, dilation=(1,1), kernel_size=(9,9), padding=(4,4),
你好,请问这个解决了吗,我的也是报错,不过是 aten::where
没有,不支持的算子太多了,之后试了rk3588的rknn的方案也不行,topK算子无法重写绕开。现在用的是jetson orin nano,这个用起来倒是挺顺利,但考虑到项目中国产化的要求,现在还在等着硬件部门给新显卡写驱动
https://github.com/Tencent/ncnn/pull/6397