Open
binzhangbin
opened this issue 3 years ago
•
15 comments
Traceback (most recent call last):
File "train.py", line 140, in
output = model(X_batch)
File "/home/zzp/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zzp/2t/binzhang/Medical-Transformer/lib/models/axialnet.py", line 507, in forward
return self._forward_impl(x)
File "/home/zzp/2t/binzhang/Medical-Transformer/lib/models/axialnet.py", line 485, in _forward_impl
x1 = self.layer1(x)
File "/home/zzp/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zzp/.local/lib/python3.8/site-packages/torch/nn/modules/container.py", line 139, in forward
input = module(input)
File "/home/zzp/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zzp/2t/binzhang/Medical-Transformer/lib/models/axialnet.py", line 331, in forward
out = self.hight_block(out)
File "/home/zzp/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/zzp/2t/binzhang/Medical-Transformer/lib/models/axialnet.py", line 157, in forward
qr = torch.einsum('bgci,cij->bgij', q, q_embedding)
File "/home/zzp/.local/lib/python3.8/site-packages/torch/functional.py", line 299, in einsum
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
RuntimeError: einsum(): operands do not broadcast with remapped shapes [original->remapped]: [2000, 8, 1, 500]->[2000, 8, 500, 1, 1] [1, 64, 64]->[1, 1, 64, 64, 1]
qr = np.einsum('bgci,cij->bgij', q, q_embedding)
File "<array_function internals>", line 6, in einsum
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/numpy/core/einsumfunc.py", line 1356, in einsum
return c_einsum(*operands, **kwargs)
ValueError: operands could not be broadcast together with remapped shapes [original->remapped]: (500,8,1,500)->(500,8,500,newaxis,1) (1,64,64)->(64,64,1)
1000, 1000, 3)
(256, 256, 1)
torch.Size([500, 8, 1, 500])
torch.Size([1, 64, 64])
torch.Size([500, 8, 1, 500])
Traceback (most recent call last):
File "/media/lab549/Data/Medical-Transformer-main/train.py", line 140, in
output = model(X_batch)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call
result = self.forward(*input, **kwargs)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 152, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 162, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 85, in parallel_apply
output.reraise()
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/_utils.py", line 394, in reraise
raise self.exc_type(msg)
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 60, in _worker
output = module(*input, **kwargs)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call
result = self.forward(*input, **kwargs)
File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 717, in forward
return self._forward_impl(x)
File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 642, in _forward_impl
x1 = self.layer1(x)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call
result = self.forward(*input, **kwargs)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/container.py", line 100, in forward
input = module(input)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call
result = self.forward(*input, **kwargs)
File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 337, in forward
out = self.hight_block(out)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call
result = self.forward(*input, **kwargs)
File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 161, in forward
qr = torch.einsum('bgci,cij->bgij', q, q_embedding)
File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/functional.py", line 241, in einsum
return torch._C._VariableFunctions.einsum(equation, operands)
RuntimeError: size of dimension does not match previous size, operand 1, dim 1
File "train.py", line 140, in
output = model(X_batch)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 507, in forward
return self._forward_impl(x)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 485, in _forward_impl
x1 = self.layer1(x)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\container.py", line 139, in forward
input = module(input)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 331, in forward
out = self.hight_block(out)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 157, in forward
qr = torch.einsum('bgci,cij->bgij', q, q_embedding)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\functional.py", line 299, in einsum
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
RuntimeError: einsum(): operands do not broadcast with remapped shapes [original->remapped]: [2560, 8, 1, 360]->[2560, 8, 360, 1, 1] [1, 64, 64]->[1, 1, 64, 64, 1]
Yes, with the current code setup, this network needs fixed image size for all images in dataset as input.
I came across a new error report, You said before that the size of the input image is different, but my image size is the same this time, but I still report this error.
Total_params: 1347266
Traceback (most recent call last):
File "train.py", line 140, in
output = model(X_batch)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 507, in forward
return self._forward_impl(x)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 485, in _forward_impl
x1 = self.layer1(x)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\container.py", line 139, in forward
input = module(input)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 331, in forward
out = self.hight_block(out)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\nn\modules\module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "E:\Medical-Transformer-main\lib\models\axialnet.py", line 157, in forward
qr = torch.einsum('bgci,cij->bgij', q, q_embedding)
File "D:\anaconda1\envs\Medical-transformer\lib\site-packages\torch\functional.py", line 299, in einsum
return _VF.einsum(equation, operands) # type: ignore[attr-defined]
RuntimeError: einsum(): operands do not broadcast with remapped shapes [original->remapped]: [2560, 8, 1, 360]->[2560, 8, 360, 1, 1] [1, 180, 180]->[1, 1, 180, 180, 1]
Yes, with the current code setup, this network needs fixed image size for all images in dataset as input.
I came across a new error report, You said before that the size of the input image is different, but my image size is the same this time, but I still report this error. my picture is 1280*720 pixel
1000, 1000, 3) (256, 256, 1) torch.Size([500, 8, 1, 500]) torch.Size([1, 64, 64]) torch.Size([500, 8, 1, 500]) Traceback (most recent call last): File "/media/lab549/Data/Medical-Transformer-main/train.py", line 140, in output = model(X_batch) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call result = self.forward(*input, **kwargs) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 152, in forward outputs = self.parallel_apply(replicas, inputs, kwargs) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/data_parallel.py", line 162, in parallel_apply return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 85, in parallel_apply output.reraise() File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/_utils.py", line 394, in reraise raise self.exc_type(msg) RuntimeError: Caught RuntimeError in replica 0 on device 0. Original Traceback (most recent call last): File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/parallel/parallel_apply.py", line 60, in _worker output = module(*input, **kwargs) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call result = self.forward(*input, **kwargs) File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 717, in forward return self._forward_impl(x) File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 642, in _forward_impl x1 = self.layer1(x) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call result = self.forward(*input, **kwargs) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/container.py", line 100, in forward input = module(input) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call result = self.forward(*input, **kwargs) File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 337, in forward out = self.hight_block(out) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in call result = self.forward(*input, **kwargs) File "/media/lab549/Data/Medical-Transformer-main/lib/models/axialnet.py", line 161, in forward qr = torch.einsum('bgci,cij->bgij', q, q_embedding) File "/home/lab549/anaconda3/envs/medt/lib/python3.6/site-packages/torch/functional.py", line 241, in einsum return torch._C._VariableFunctions.einsum(equation, operands) RuntimeError: size of dimension does not match previous size, operand 1, dim 1