test.py
We use the scale: 1 -------test----------- Load the model from ./model\ft_ResNet50\net_119.pth 1 -> 3: We use the scale: 1 -------test----------- Load the model from ./model\ft_ResNet50\net_119.pth 1 -> 3: We use the scale: 1 -------test----------- Load the model from ./model\ft_ResNet50\net_119.pth 1 -> 3: We use the scale: 1 -------test----------- Load the model from ./model\ft_ResNet50\net_119.pth 1 -> 3: Continuously looping
Are you running test.py?
Please check https://github.com/layumi/University1652-Baseline/blob/master/test.py#L229
Did you modify the code?
There are no any for or while loop functions.
Hello, 229 represents' query_statellite '1->' gallery-drone '3, but there is still a loop problem in the image when running test.by
def extract_feature(model,dataloaders, view_index = 1): features = torch.FloatTensor() count = 0 for data in dataloaders: img, label = data n, c, h, w = img.size() count += n print(count) ff = torch.FloatTensor(n,512).zero_().cuda()
for i in range(2):
if(i==1):
img = fliplr(img)
input_img = Variable(img.cuda())
for scale in ms:
if scale != 1:
# bicubic is only available in pytorch>= 1.1
input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bilinear', align_corners=False)
if opt.views ==2:
if view_index == 1:
outputs, _ = model(input_img, None)
elif view_index ==2:
_, outputs = model(None, input_img)
elif opt.views ==3:
if view_index == 1:
outputs, _, _ = model(input_img, None, None)
elif view_index ==2:
_, outputs, _ = model(None, input_img, None)
elif view_index ==3:
_, _, outputs = model(None, None, input_img)
ff += outputs
# norm feature
if opt.PCB:
# feature size (n,2048,6)
# 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
# 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
ff = ff.div(fnorm.expand_as(ff))
ff = ff.view(ff.size(0), -1)
else:
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features,ff.data.cpu()), 0)
return features
UnboundLocalError: local variable 'outputs' referenced before assignment
def extract_feature(model,dataloaders, view_index = 1): features = torch.FloatTensor() count = 0 for data in dataloaders: img, label = data n, c, h, w = img.size() count += n print(count) ff = torch.FloatTensor(n,512).zero_().cuda()
for i in range(2): if(i==1): img = fliplr(img) input_img = Variable(img.cuda()) for scale in ms: if scale != 1: # bicubic is only available in pytorch>= 1.1 input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bilinear', align_corners=False) if opt.views ==2: if view_index == 1: outputs, _ = model(input_img, None) elif view_index ==2: _, outputs = model(None, input_img) elif opt.views ==3: if view_index == 1: outputs, _, _ = model(input_img, None, None) elif view_index ==2: _, outputs, _ = model(None, input_img, None) elif view_index ==3: _, _, outputs = model(None, None, input_img) ff += outputs # norm feature if opt.PCB: # feature size (n,2048,6) # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature. # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6). fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6) ff = ff.div(fnorm.expand_as(ff)) ff = ff.view(ff.size(0), -1) else: fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) ff = ff.div(fnorm.expand_as(ff)) features = torch.cat((features,ff.data.cpu()), 0) return featuresUnboundLocalError: local variable 'outputs' referenced before assignment
i have the same problem,how did you fix it
Make numw_workers=0,but when count=701, there will still be an UnboundLocalError: local variable 'outputs' referenced before assignment,which remains unresolved
Hi @Mayuzhuo0323 @ckmessi @hominsu
Thank you @ps-star
If you use Windows, please make sure your numw_workers=0. This is a well-kown case for pytorch on Windows.
@ps-star Could you provide more information about bug logs and your system?
