SOTA-Vision icon indicating copy to clipboard operation
SOTA-Vision copied to clipboard

TransUnet example error

Open ahmadSum1 opened this issue 2 years ago • 1 comments

https://github.com/04RR/SOTA-Vision/blob/bf33a3d7025490e1f4afd53f276f91b2e3ed308e/README.md?plain=1#L41

I am getting a memory error here;

RuntimeError: [enforce fail at CPUAllocator.cpp:61] . DefaultCPUAllocator: can't allocate memory: you tried to allocate 154618822656 bytes. Error code 12 (Cannot allocate memory)

`--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) /mundus/sahmed035/SOTA-Vision/test.ipynb Cell 2' in <cell line: 4>() 1 import torch 2 from TransUNet import TransUNet ----> 4 model = TransUNet( 5 img_dim= 128, 6 patch_dim= 16, 7 in_channels= 3, 8 classes= 2, 9 blocks= 6, 10 heads= 8, 11 linear_dim= 1024 12 ) 14 x = torch.randn(1, 3, 128, 128) 15 model(x)

File /baie/nfs-cluster-1/mundus/sahmed035/SOTA-Vision/TransUNet.py:155, in TransUNet.init(self, img_dim, patch_dim, in_channels, classes, blocks, heads, linear_dim) 143 def init( 144 self, 145 img_dim, (...) 151 linear_dim=1024, 152 ): 153 super(TransUNet, self).init() --> 155 self.encoder = TransUNetEncoder( 156 img_dim, patch_dim, in_channels, classes, blocks, heads, linear_dim 157 ) 159 self.decoder1 = TransUNetDecoderUnit(1024, 256) 160 self.decoder2 = TransUNetDecoderUnit(512, 128)

File /baie/nfs-cluster-1/mundus/sahmed035/SOTA-Vision/TransUNet.py:90, in TransUNetEncoder.init(self, img_dim, patch_dim, in_channels, classes, layers, heads, linear_dim) 85 self.patch_dim = patch_dim 87 self.layer1 = nn.Conv2d( 88 in_channels, self.channels, kernel_size=7, stride=2, padding=3, bias=False 89 ) ---> 90 self.layer2 = BottleNeckUnit(self.channels, self.channels * 2) 91 self.layer3 = BottleNeckUnit(self.channels * 2, self.channels * 4) 92 self.layer4 = BottleNeckUnit(self.channels * 4, self.channels * 8)

File /baie/nfs-cluster-1/mundus/sahmed035/SOTA-Vision/TransUNet.py:32, in BottleNeckUnit.init(self, in_channels, out_channels, base_width, stride) 19 self.downsample = nn.Identity() 21 gamma = (base_width // 64) * out_channels 23 self.layer = nn.Sequential( 24 nn.Conv2d( 25 in_channels, 26 gamma * out_channels, 27 kernel_size=1, 28 stride=stride, 29 bias=False, 30 ), 31 nn.BatchNorm2d(gamma * out_channels), ---> 32 nn.Conv2d( 33 gamma * out_channels, 34 gamma * out_channels, 35 kernel_size=3, 36 stride=stride, 37 padding=1, 38 groups=1, 39 bias=False, 40 dilation=1, 41 ), 42 nn.BatchNorm2d(gamma * out_channels), 43 nn.Conv2d( 44 gamma * out_channels, 45 out_channels, 46 kernel_size=1, 47 stride=stride, 48 bias=False, 49 ), 50 nn.BatchNorm2d(out_channels), 51 )

File ~/miniconda3/envs/model_drift/lib/python3.9/site-packages/torch/nn/modules/conv.py:433, in Conv2d.init(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, device, dtype) 431 padding_ = padding if isinstance(padding, str) else pair(padding) 432 dilation = pair(dilation) --> 433 super(Conv2d, self).init( 434 in_channels, out_channels, kernel_size, stride_, padding_, dilation_, 435 False, _pair(0), groups, bias, padding_mode, **factory_kwargs)

File ~/miniconda3/envs/model_drift/lib/python3.9/site-packages/torch/nn/modules/conv.py:131, in _ConvNd.init(self, in_channels, out_channels, kernel_size, stride, padding, dilation, transposed, output_padding, groups, bias, padding_mode, device, dtype) 128 self.weight = Parameter(torch.empty( 129 (in_channels, out_channels // groups, *kernel_size), **factory_kwargs)) 130 else: --> 131 self.weight = Parameter(torch.empty( 132 (out_channels, in_channels // groups, *kernel_size), **factory_kwargs)) 133 if bias: 134 self.bias = Parameter(torch.empty(out_channels, **factory_kwargs))

RuntimeError: [enforce fail at CPUAllocator.cpp:61] . DefaultCPUAllocator: can't allocate memory: you tried to allocate 154618822656 bytes. Error code 12 (Cannot allocate memory)`

ahmadSum1 avatar May 08 '22 01:05 ahmadSum1