ncnn
ncnn copied to clipboard
PNNX not get desired result
this model converted as:
7767517
21 20
pnnx.Input pnnx_input_0 0 1 0 #0=(1,3,224,224)f32
pnnx.Attribute bn_0 0 1 1 @running_var=(16)f32 #1=(16)f32
pnnx.Attribute pnnx_unique_1 0 1 2 @running_mean=(16)f32 #2=(16)f32
pnnx.Attribute pnnx_unique_2 0 1 3 @bias=(16)f32 #3=(16)f32
pnnx.Attribute pnnx_unique_3 0 1 4 @weight=(16)f32 #4=(16)f32
nn.Conv2d conv2d_0 1 1 0 5 bias=True dilation=(1,1) groups=1 in_channels=3 kernel_size=(2,2) out_channels=16 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(16)f32 @weight=(16,3,2,2)f32 $input=0 #0=(1,3,224,224)f32 #5=(1,16,223,223)f32
F.batch_norm F.batch_norm_0 5 1 5 2 1 4 3 6 eps=1.000000e-05 $input=5 $running_mean=2 $running_var=1 $weight=4 $bias=3 #5=(1,16,223,223)f32 #2=(16)f32 #1=(16)f32 #4=(16)f32 #3=(16)f32 #6=(1,16,223,223)f32
pnnx.Attribute bn_1 0 1 7 @running_var=(8)f32 #7=(8)f32
pnnx.Attribute pnnx_unique_5 0 1 8 @running_mean=(8)f32 #8=(8)f32
pnnx.Attribute pnnx_unique_6 0 1 9 @bias=(8)f32 #9=(8)f32
pnnx.Attribute pnnx_unique_7 0 1 10 @weight=(8)f32 #10=(8)f32
F.relu F.relu_9 1 1 6 11 $input=6 #6=(1,16,223,223)f32 #11=(1,16,223,223)f32
nn.Conv2d conv2d_1 1 1 11 12 bias=True dilation=(1,1) groups=1 in_channels=16 kernel_size=(4,4) out_channels=8 padding=(2,2) padding_mode=zeros stride=(3,3) @bias=(8)f32 @weight=(8,16,4,4)f32 $input=11 #11=(1,16,223,223)f32 #12=(1,8,75,75)f32
F.batch_norm F.batch_norm_1 5 1 12 8 7 10 9 13 eps=1.000000e-05 $input=12 $running_mean=8 $running_var=7 $weight=10 $bias=9 #12=(1,8,75,75)f32 #8=(8)f32 #7=(8)f32 #10=(8)f32 #9=(8)f32 #13=(1,8,75,75)f32
F.max_pool2d F.max_pool2d_8 1 1 13 14 ceil_mode=False dilation=(1,1) kernel_size=(2,2) padding=(0,0) return_indices=False stride=(2,2) $input=13 #13=(1,8,75,75)f32 #14=(1,8,37,37)f32
nn.Conv2d conv2d_2 1 1 14 15 bias=True dilation=(1,1) groups=1 in_channels=8 kernel_size=(6,6) out_channels=8 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(8)f32 @weight=(8,8,6,6)f32 $input=14 #14=(1,8,37,37)f32 #15=(1,8,32,32)f32
nn.Conv2d conv2d_3 1 1 15 16 bias=True dilation=(1,1) groups=1 in_channels=8 kernel_size=(6,6) out_channels=8 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(8)f32 @weight=(8,8,6,6)f32 $input=15 #15=(1,8,32,32)f32 #16=(1,8,27,27)f32
nn.Conv2d conv2d_4 1 1 16 17 bias=True dilation=(1,1) groups=1 in_channels=8 kernel_size=(6,6) out_channels=8 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(8)f32 @weight=(8,8,6,6)f32 $input=16 #16=(1,8,27,27)f32 #17=(1,8,22,22)f32
nn.Conv2d conv2d_5 1 1 17 18 bias=True dilation=(1,1) groups=1 in_channels=8 kernel_size=(6,6) out_channels=8 padding=(0,0) padding_mode=zeros stride=(1,1) @bias=(8)f32 @weight=(8,8,6,6)f32 $input=17 #17=(1,8,22,22)f32 #18=(1,8,17,17)f32
F.upsample F.upsample_10 1 1 18 19 align_corners=False mode=bilinear scale_factor=(2.000000e+00,2.000000e+00) $input=18 #18=(1,8,17,17)f32 #19=(1,8,34,34)f32
pnnx.Output pnnx_output_0 1 0 19 #19=(1,8,34,34)f32
which is obviously not we desired......
I don't know where goes wrong, but at least should merge bn....... and I don't why there are some pnnx.Attributes??????? and what is the hell F.batch_norm??????