pytorch2keras icon indicating copy to clipboard operation
pytorch2keras copied to clipboard

Layer weight shape don't match

Open Gasp34 opened this issue 5 years ago • 2 comments

Describe the bug A get the error : Layer weight shape (4, 19, 4, 300) not compatible with provided weight shape (4, 19, 1, 300)

To Reproduce

class ConvNet(nn.Module):
    def __init__(self, num_classes):
        super(ConvNet, self).__init__()
        
        # for layer one, separate convolution and relu step from maxpool and batch normalization
        # to extract convolutional filters
        self.layer1_conv = nn.Sequential(
            nn.Conv2d(in_channels=1,
                      out_channels=300,
                      kernel_size=(4, 19),
                      stride=1,
                      padding=0),  # padding is done in forward method along 1 dimension only
            nn.ReLU())

        self.layer1_process = nn.Sequential(
            nn.MaxPool2d(kernel_size=(1,3), stride=(1,3), padding=(0,1)),
            nn.BatchNorm2d(300))

        self.layer2 = nn.Sequential(
            nn.Conv2d(in_channels=300,
                      out_channels=200,
                      kernel_size=(1, 11),
                      stride=1,
                      padding=0),  # padding is done in forward method along 1 dimension only
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(1,4), stride=(1,4), padding=(0,1)),
            nn.BatchNorm2d(200))

        self.layer3 = nn.Sequential(
            nn.Conv2d(in_channels=200,
                      out_channels=200,
                      kernel_size=(1, 7),
                      stride=1,
                      padding=0),  # padding is done in forward method along 1 dimension only
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(1, 4), stride=(1,4), padding=(0,1)),
            nn.BatchNorm2d(200))

        self.layer4 = nn.Sequential(
            nn.Linear(in_features=1000,
                      out_features=1000),
            nn.ReLU(),
            nn.Dropout(p=0.03))

        self.layer5 = nn.Sequential(
            nn.Linear(in_features=1000,
                      out_features=1000),
            nn.ReLU(),
            nn.Dropout(p=0.03))

        self.layer6 = nn.Sequential(
                nn.Linear(in_features=1000,
                          out_features=num_classes))#,
                #nn.Sigmoid())


    def forward(self, input):
        # run all layers on input data
        # add dummy dimension to input (for num channels=1)
        print(input.shape)
        input = torch.unsqueeze(input, 1)
        print(input.shape)
        # Run convolutional layers
        input = F.pad(input, (9, 9), mode='constant', value=0) # padding - last dimension goes first
        print(input.shape)
        out = self.layer1_conv(input)
        print(out.shape)
        out = self.layer1_process(out)
        print(out.shape)
        out = F.pad(out, (5, 5), mode='constant', value=0)
        out = self.layer2(out)

        out = F.pad(out, (3, 3), mode='constant', value=0)
        out = self.layer3(out)
        
        # Flatten output of convolutional layers
        out = out.view(int(out.size()[0]), -1)
        
        # run fully connected layers
        out = self.layer4(out)
        out = self.layer5(out)
        predictions = self.layer6(out)
                
        return predictions


from pytorch2keras.converter import pytorch_to_keras
from torch.autograd import Variable
input_np = np.random.uniform(0, 1, (1,4,251))
input_var = Variable(torch.FloatTensor(input_np))
output = model(input_var)
k_model = pytorch_to_keras(model, input_var, (4,251), verbose=True)

Environment (please complete the following information):

  • OS: [Windows 10]
  • Python [Python 3]
  • Version [e.g. v0.1.11]

Any idea of what is going wrong ? Thank you very much!

Gasp34 avatar May 30 '19 19:05 Gasp34

I fixed this by moving the unsqueeze outside the model to always have 1 channel, now i got and error on my first dropout layer :

    W = weights[weights_name].numpy().transpose()

KeyError: 'layer4.2.weight'

Gasp34 avatar May 30 '19 19:05 Gasp34

@Gasp34 I updated tracing module. You can try to convert your model with new version of the converter

gmalivenko avatar Jun 27 '19 19:06 gmalivenko