aquvitae icon indicating copy to clipboard operation
aquvitae copied to clipboard

AttributeError: 'tuple' object has no attribute 'ndimension'

Open GeneralJing opened this issue 4 years ago • 3 comments

lb.shape: (640, 1280) main() File "tools/train_aquvitae.py", line 85, in main student = Dist( File "/home/xxx/anaconda3/envs/tp/lib/python3.8/site-packages/aquvitae/dist.py", line 28, in dist return _torch_dist( File "/home/xxx/anaconda3/envs/tp/lib/python3.8/site-packages/aquvitae/dist.py", line 118, in _torch_dist loss = algo.teach_step(x, y) File "/home/xxx/anaconda3/envs/tp/lib/python3.8/site-packages/aquvitae/algo/st_torch.py", line 30, in teach_step self.logging_metrics(y, s_logits) File "/home/xxx/anaconda3/envs/tp/lib/python3.8/site-packages/aquvitae/algo/base_torch.py", line 23, in logging_metrics self.metrics[name].update([logits, labels]) File "/home/xxx/anaconda3/envs/tp/lib/python3.8/site-packages/ignite/metrics/metric.py", line 605, in wrapper func(self, *args, **kwargs) File "/home/xxx/anaconda3/envs/tp/lib/python3.8/site-packages/ignite/metrics/accuracy.py", line 147, in update self._check_shape(output) File "/home/xxx/anaconda3/envs/tp/lib/python3.8/site-packages/ignite/metrics/accuracy.py", line 30, in _check_shape if not (y.ndimension() == y_pred.ndimension() or y.ndimension() + 1 == y_pred.ndimension()): AttributeError: 'tuple' object has no attribute 'ndimension' Training - 1/3000 [ ] ELP: 00:01

GeneralJing avatar Oct 29 '21 01:10 GeneralJing

Can you provide a pytorch example that generates train_ds?

GeneralJing avatar Oct 29 '21 02:10 GeneralJing

here is the code function that i use.

def get_data_loader(cfg, mode='train', distributed=True): if mode == 'train': trans_func = TransformationTrain(cfg.scales, cfg.cropsize) batchsize = cfg.ims_per_gpu annpath = cfg.train_im_anns shuffle = True drop_last = True elif mode == 'val': trans_func = TransformationVal() batchsize = cfg.eval_ims_per_gpu annpath = cfg.val_im_anns shuffle = False drop_last = False

ds = eval(cfg.dataset)(cfg.im_root, annpath, trans_func=trans_func, mode=mode)

if distributed:
    assert dist.is_available(), "dist should be initialzed"
    if mode == 'train':
        assert not cfg.max_iter is None
        n_train_imgs = cfg.ims_per_gpu * dist.get_world_size() * cfg.max_iter
        sampler = RepeatedDistSampler(ds, n_train_imgs, shuffle=shuffle)
    else:
        sampler = torch.utils.data.distributed.DistributedSampler(
            ds, shuffle=shuffle)
    batchsampler = torch.utils.data.sampler.BatchSampler(
        sampler, batchsize, drop_last=drop_last
    )
    dl = DataLoader(
        ds,
        batch_sampler=batchsampler,
        num_workers=4,
        pin_memory=True,
    )
else:
    dl = DataLoader(
        ds,
        batch_size=batchsize,
        shuffle=shuffle,
        drop_last=drop_last,
        num_workers=4,
        pin_memory=True,
    )
return dl

GeneralJing avatar Oct 29 '21 02:10 GeneralJing

Hello! Have you solved this problem now? I come across a same error after 1 epoch of training with ignite.

Ko-vey avatar May 24 '22 13:05 Ko-vey