pytorch-grad-cam
pytorch-grad-cam copied to clipboard
Unet++ Gram cam
Dear [jacobgil],I trained an model like unet++ with 4 outputs,some codes are shown like below,I use :
target_layers = [model.final4] with GradCAM(model=model, target_layers=target_layers, use_cuda=torch.cuda.is_available()) as cam: targets = [ClassifierOutputTarget(0),ClassifierOutputTarget(0)] for i, ( data, labels) in enumerate(tbar): data = data.cuda() grayscale_cams = cam(input_tensor=data, targets=targets) It turn out "An exception occurred in CAM with block: <class 'IndexError'>. Message: index 1 is out of bounds for dimension 1 with size 1" Could you help me to analyze what is the wrong in my code? Thank you very much
class UNetPlusPlus(nn.Module): def init(self, num_classes, input_channels, block, num_blocks, nb_filter,deep_supervision=False): super(UNetPlusPlus, self).init() self.relu = nn.ReLU(inplace = True) self.deep_supervision = deep_supervision self.pool = nn.MaxPool2d(2, 2) self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) self.down = nn.Upsample(scale_factor=0.5, mode='bilinear', align_corners=True)
self.up_4 = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)
self.up_8 = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True)
self.up_16 = nn.Upsample(scale_factor=16, mode='bilinear', align_corners=True)
self.conv0_0 = self._make_layer(block, input_channels, nb_filter[0])
self.conv1_0 = self._make_layer(block, nb_filter[0], nb_filter[1], num_blocks[0])
self.conv2_0 = self._make_layer(block, nb_filter[1], nb_filter[2], num_blocks[1])
self.conv3_0 = self._make_layer(block, nb_filter[2], nb_filter[3], num_blocks[2])
self.conv4_0 = self._make_layer(block, nb_filter[3], nb_filter[4], num_blocks[3])
self.conv0_1 = self._make_layer(block, nb_filter[0] + nb_filter[1], nb_filter[0])
self.conv1_1 = self._make_layer(block, nb_filter[1] + nb_filter[2] , nb_filter[1], num_blocks[0])
self.conv2_1 = self._make_layer(block, nb_filter[2] + nb_filter[3] , nb_filter[2], num_blocks[1])
self.conv3_1 = self._make_layer(block, nb_filter[3] + nb_filter[4] , nb_filter[3], num_blocks[2])
self.conv0_2 = self._make_layer(block, nb_filter[0]*2 + nb_filter[1], nb_filter[0])
self.conv1_2 = self._make_layer(block, nb_filter[1]*2 + nb_filter[2], nb_filter[1], num_blocks[0])
self.conv2_2 = self._make_layer(block, nb_filter[2]*2 + nb_filter[3], nb_filter[2], num_blocks[1])
self.conv0_3 = self._make_layer(block, nb_filter[0]*3 + nb_filter[1], nb_filter[0])
self.conv1_3 = self._make_layer(block, nb_filter[1]*3 + nb_filter[2], nb_filter[1], num_blocks[0])
self.conv0_4 = self._make_layer(block, nb_filter[0]*4 + nb_filter[1], nb_filter[0])
self.conv0_4_final = self._make_layer(block, nb_filter[0]*5, nb_filter[0])
self.conv0_4_1x1 = nn.Conv2d(nb_filter[4], nb_filter[0], kernel_size=1, stride=1)
self.conv0_3_1x1 = nn.Conv2d(nb_filter[3], nb_filter[0], kernel_size=1, stride=1)
self.conv0_2_1x1 = nn.Conv2d(nb_filter[2], nb_filter[0], kernel_size=1, stride=1)
self.conv0_1_1x1 = nn.Conv2d(nb_filter[1], nb_filter[0], kernel_size=1, stride=1)
if self.deep_supervision:
self.final1 = nn.Conv2d (nb_filter[0], num_classes, kernel_size=1)
self.final2 = nn.Conv2d (nb_filter[0], num_classes, kernel_size=1)
self.final3 = nn.Conv2d (nb_filter[0], num_classes, kernel_size=1)
self.final4 = nn.Conv2d (nb_filter[0], num_classes, kernel_size=1)
else:
self.final = nn.Conv2d (nb_filter[0], num_classes, kernel_size=1)
def _make_layer(self, block, input_channels, output_channels, num_blocks=1):
layers = []
layers.append(block(input_channels, output_channels))
for i in range(num_blocks-1):
layers.append(block(output_channels, output_channels))
return nn.Sequential(*layers)
def forward(self, input):
x0_0 = self.conv0_0(input)
x1_0 = self.conv1_0(self.pool(x0_0))
x0_1 = self.conv0_1(torch.cat([x0_0, self.up(x1_0)], 1))
x2_0 = self.conv2_0(self.pool(x1_0))
x1_1 = self.conv1_1(torch.cat([x1_0, self.up(x2_0)], 1))
x0_2 = self.conv0_2(torch.cat([x0_0, x0_1, self.up(x1_1)], 1))
x3_0 = self.conv3_0(self.pool(x2_0))
x2_1 = self.conv2_1(torch.cat([x2_0, self.up(x3_0)], 1))
x1_2 = self.conv1_2(torch.cat([x1_0, x1_1, self.up(x2_1)], 1))
x0_3 = self.conv0_3(torch.cat([x0_0, x0_1, x0_2, self.up(x1_2)], 1))
#right side decoder
x4_0 = self.conv4_0(self.pool(x3_0))#this
x3_1 = self.conv3_1(torch.cat([x3_0, self.up(x4_0)], 1))#this
x2_2 = self.conv2_2(torch.cat([x2_0, x2_1, self.up(x3_1)], 1)) #this
x1_3 = self.conv1_3(torch.cat([x1_0, x1_1, x1_2, self.up(x2_2)], 1)) #this
x0_4 = self.conv0_4(torch.cat([x0_0, x0_1, x0_2, x0_3, self.up(x1_3)], 1))#this
if self.deep_supervision:
output1 = self.final1(x0_1)
output2 = self.final2(x0_2)
output3 = self.final3(x0_3)
output4 = self.final4(x0_4)
return [output1, output2, output3, output4]
else:
output = self.final(x0_4)
return output
Encounter the same problem. Did you find the solution for this? I also tried to visualize the grad-cam map of the Unet++ model.