pytorch2keras
pytorch2keras copied to clipboard
padding bug
Describe the bug I get an error when I convolve with the padding !=0
To Reproduce this is my model
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=1, padding=1, bias=True)
self.in1 = nn.InstanceNorm2d(64, affine=True)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=True
self.in2 = nn.InstanceNorm2d(128, affine=True)
self.conv3 = nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=True)
self.in3 = nn.InstanceNorm2d(256, affine=True)
self.deconv1 = nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True)
self.in4 = nn.InstanceNorm2d(128, affine=True)
self.deconv2 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=True)
self.in5 = nn.InstanceNorm2d(64, affine=True)
self.conv4 = nn.Conv2d(64, 1, kernel_size=7, stride=1, padding=3, bias=True)
def forward(self, x):
out = self.conv1(x)
out = self.in1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.in2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.in3(out)
out = self.relu(out)
out = self.deconv1(out)
out = self.in4(out)
out = self.relu(out)
out = self.deconv2(out)
out = self.in5(out)
out = self.relu(out)
out = self.conv4(out)
out = torch.tanh(out)
return out
Logs INFO:pytorch2keras:Converter is called. WARNING:pytorch2keras:Custom shapes isn't supported now. DEBUG:pytorch2keras:Input_names: DEBUG:pytorch2keras:['input_0'] DEBUG:pytorch2keras:Output_names: DEBUG:pytorch2keras:['output_0'] Exported graph: graph(%input_0 : Float(1, 1, 64, 64, strides=[4096, 4096, 64, 1], requires_grad=0, device=cpu), %deconv1.weight : Float(256, 128, 5, 5, strides=[3200, 25, 5, 1], requires_grad=1, device=cpu), %deconv1.bias : Float(128, strides=[1], requires_grad=1, device=cpu), %bn4.weight : Float(128, strides=[1], requires_grad=1, device=cpu), %bn4.bias : Float(128, strides=[1], requires_grad=1, device=cpu), %bn4.running_mean : Float(128, strides=[1], requires_grad=0, device=cpu), %bn4.running_var : Float(128, strides=[1], requires_grad=0, device=cpu), %deconv2.weight : Float(128, 64, 5, 5, strides=[1600, 25, 5, 1], requires_grad=1, device=cpu), %deconv2.bias : Float(64, strides=[1], requires_grad=1, device=cpu), %bn5.weight : Float(64, strides=[1], requires_grad=1, device=cpu), %bn5.bias : Float(64, strides=[1], requires_grad=1, device=cpu), %bn5.running_mean : Float(64, strides=[1], requires_grad=0, device=cpu), %bn5.running_var : Float(64, strides=[1], requires_grad=0, device=cpu), %deconv3.weight : Float(64, 1, 5, 5, strides=[25, 25, 5, 1], requires_grad=1, device=cpu), %deconv3.bias : Float(1, strides=[1], requires_grad=1, device=cpu), %onnx::Conv_56 : Float(64, 1, 5, 5, strides=[25, 25, 5, 1], requires_grad=0, device=cpu), %onnx::Conv_57 : Float(64, strides=[1], requires_grad=0, device=cpu), %onnx::Conv_59 : Float(128, 64, 5, 5, strides=[1600, 25, 5, 1], requires_grad=0, device=cpu), %onnx::Conv_60 : Float(128, strides=[1], requires_grad=0, device=cpu), %onnx::Conv_62 : Float(256, 128, 5, 5, strides=[3200, 25, 5, 1], requires_grad=0, device=cpu), %onnx::Conv_63 : Float(256, strides=[1], requires_grad=0, device=cpu)): %/conv1/Conv_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/conv1/Conv"](%input_0, %onnx::Conv_56, %onnx::Conv_57), scope: pt.mymodel.Generator::/torch.nn.modules.conv.Conv2d::conv1 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0 %/relu/Relu_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0 %/conv2/Conv_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/conv2/Conv"](%/relu/Relu_output_0, %onnx::Conv_59, %onnx::Conv_60), scope: pt.mymodel.Generator::/torch.nn.modules.conv.Conv2d::conv2 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0 %/relu_1/Relu_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_1/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0 %/conv3/Conv_output_0 : Float(1, 256, 64, 64, strides=[1048576, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Conv[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/conv3/Conv"](%/relu_1/Relu_output_0, %onnx::Conv_62, %onnx::Conv_63), scope: pt.mymodel.Generator::/torch.nn.modules.conv.Conv2d::conv3 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:458:0 %/relu_2/Relu_output_0 : Float(1, 256, 64, 64, strides=[1048576, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_2/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0 %/deconv1/ConvTranspose_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=0, device=cpu) = onnx::ConvTranspose[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/deconv1/ConvTranspose"](%/relu_2/Relu_output_0, %deconv1.weight, %deconv1.bias), scope: pt.mymodel.Generator::/torch.nn.modules.conv.ConvTranspose2d::deconv1 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:953:0 %/bn4/BatchNormalization_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn4/BatchNormalization"](%/deconv1/ConvTranspose_output_0, %bn4.weight, %bn4.bias, %bn4.running_mean, %bn4.running_var), scope: pt.mymodel.Generator::/torch.nn.modules.batchnorm.BatchNorm2d::bn4 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:2435:0 %/relu_3/Relu_output_0 : Float(1, 128, 64, 64, strides=[524288, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_3/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0 %/deconv2/ConvTranspose_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=0, device=cpu) = onnx::ConvTranspose[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/deconv2/ConvTranspose"](%/relu_3/Relu_output_0, %deconv2.weight, %deconv2.bias), scope: pt.mymodel.Generator::/torch.nn.modules.conv.ConvTranspose2d::deconv2 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:953:0 %/bn5/BatchNormalization_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::BatchNormalization[epsilon=1.0000000000000001e-05, momentum=0.90000000000000002, training_mode=0, onnx_name="/bn5/BatchNormalization"](%/deconv2/ConvTranspose_output_0, %bn5.weight, %bn5.bias, %bn5.running_mean, %bn5.running_var), scope: pt.mymodel.Generator::/torch.nn.modules.batchnorm.BatchNorm2d::bn5 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:2435:0 %/relu_4/Relu_output_0 : Float(1, 64, 64, 64, strides=[262144, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Reluonnx_name="/relu_4/Relu", scope: pt.mymodel.Generator::/torch.nn.modules.activation.ReLU::relu # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/functional.py:1453:0 %/deconv3/ConvTranspose_output_0 : Float(1, 1, 64, 64, strides=[4096, 4096, 64, 1], requires_grad=0, device=cpu) = onnx::ConvTranspose[dilations=[1, 1], group=1, kernel_shape=[5, 5], pads=[2, 2, 2, 2], strides=[1, 1], onnx_name="/deconv3/ConvTranspose"](%/relu_4/Relu_output_0, %deconv3.weight, %deconv3.bias), scope: pt.mymodel.Generator::/torch.nn.modules.conv.ConvTranspose2d::deconv3 # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/conv.py:953:0 %output_0 : Float(1, 1, 64, 64, strides=[4096, 4096, 64, 1], requires_grad=1, device=cpu) = onnx::Tanhonnx_name="/tanh/Tanh", scope: pt.mymodel.Generator::/torch.nn.modules.activation.Tanh::tanh # /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/nn/modules/activation.py:358:0 return (%output_0)
INFO:onnx2keras:Converter is called.
DEBUG:onnx2keras:List input shapes:
DEBUG:onnx2keras:[(1, 64, 64)]
DEBUG:onnx2keras:List inputs:
DEBUG:onnx2keras:Input 0 -> input_0.
DEBUG:onnx2keras:List outputs:
DEBUG:onnx2keras:Output 0 -> output_0.
DEBUG:onnx2keras:Gathering weights to dictionary.
DEBUG:onnx2keras:Found weight deconv1.weight with shape (256, 128, 5, 5).
DEBUG:onnx2keras:Found weight deconv1.bias with shape (128,).
DEBUG:onnx2keras:Found weight bn4.weight with shape (128,).
DEBUG:onnx2keras:Found weight bn4.bias with shape (128,).
DEBUG:onnx2keras:Found weight bn4.running_mean with shape (128,).
DEBUG:onnx2keras:Found weight bn4.running_var with shape (128,).
DEBUG:onnx2keras:Found weight deconv2.weight with shape (128, 64, 5, 5).
DEBUG:onnx2keras:Found weight deconv2.bias with shape (64,).
DEBUG:onnx2keras:Found weight bn5.weight with shape (64,).
DEBUG:onnx2keras:Found weight bn5.bias with shape (64,).
DEBUG:onnx2keras:Found weight bn5.running_mean with shape (64,).
DEBUG:onnx2keras:Found weight bn5.running_var with shape (64,).
DEBUG:onnx2keras:Found weight deconv3.weight with shape (64, 1, 5, 5).
DEBUG:onnx2keras:Found weight deconv3.bias with shape (1,).
DEBUG:onnx2keras:Found weight onnx::Conv_56 with shape (64, 1, 5, 5).
DEBUG:onnx2keras:Found weight onnx::Conv_57 with shape (64,).
DEBUG:onnx2keras:Found weight onnx::Conv_59 with shape (128, 64, 5, 5).
DEBUG:onnx2keras:Found weight onnx::Conv_60 with shape (128,).
DEBUG:onnx2keras:Found weight onnx::Conv_62 with shape (256, 128, 5, 5).
DEBUG:onnx2keras:Found weight onnx::Conv_63 with shape (256,).
DEBUG:onnx2keras:Found input input_0 with shape (1, 64, 64)
DEBUG:onnx2keras:######
DEBUG:onnx2keras:...
DEBUG:onnx2keras:Converting ONNX operation
DEBUG:onnx2keras:type: Conv
DEBUG:onnx2keras:node_name: /conv1/Conv_output_0
DEBUG:onnx2keras:node_params: {'dilations': [1, 1], 'group': 1, 'kernel_shape': [5, 5], 'pads': [2, 2, 2, 2], 'strides': [1, 1], 'change_ordering': False, 'name_policy': None}
DEBUG:onnx2keras:...
DEBUG:onnx2keras:Check if all inputs are available:
DEBUG:onnx2keras:Check input 0 (name input_0).
DEBUG:onnx2keras:Check input 1 (name onnx::Conv_56).
DEBUG:onnx2keras:The input not found in layers / model inputs.
DEBUG:onnx2keras:Found in weights, add as a numpy constant.
DEBUG:onnx2keras:Check input 2 (name onnx::Conv_57).
DEBUG:onnx2keras:The input not found in layers / model inputs.
DEBUG:onnx2keras:Found in weights, add as a numpy constant.
DEBUG:onnx2keras:... found all, continue
DEBUG:onnx2keras:conv:Conv with bias
DEBUG:onnx2keras:conv:2D convolution
DEBUG:onnx2keras:conv:Paddings exist, add ZeroPadding layer
Traceback (most recent call last):
File "
torch.onnx.export(G,input_var,'model.onnx',input_names=['input_0'],output_names=['output_0'],dynamic_axes={'input_0':[0],'output_0':[0]} ) /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/onnx/utils.py:2040: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input input_0 warnings.warn( /home/duyangfan/miniconda3/lib/python3.10/site-packages/torch/onnx/utils.py:2040: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input output_0 warnings.warn(
uh try this, https://github.com/gmalivenko/pytorch2keras/issues/147#issuecomment-1387642735