pytorch-fm
pytorch-fm copied to clipboard
tensor for argument #1
RuntimeError Traceback (most recent call last)
H:\Anaconda\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs) 720 result = self._slow_forward(*input, **kwargs) 721 else: --> 722 result = self.forward(*input, **kwargs) 723 for hook in itertools.chain( 724 _global_forward_hooks.values(),
~\Desktop\量化炒股\pytorch-fm-master\torchfm\model\fm.py in forward(self, x)
22 :param x: Long tensor of size (batch_size, num_fields)
23 """
---> 24 x = self.linear(x) + self.fm(self.embedding(x))
25 return torch.sigmoid(x.squeeze(1))
H:\Anaconda\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs) 720 result = self._slow_forward(*input, **kwargs) 721 else: --> 722 result = self.forward(*input, **kwargs) 723 for hook in itertools.chain( 724 _global_forward_hooks.values(),
~\Desktop\量化炒股\pytorch-fm-master\torchfm\layer.py in forward(self, x) 17 """ 18 x = x + x.new_tensor(self.offsets).unsqueeze(0) ---> 19 return torch.sum(self.fc(x), dim=1) + self.bias 20 21
H:\Anaconda\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs) 720 result = self._slow_forward(*input, **kwargs) 721 else: --> 722 result = self.forward(*input, **kwargs) 723 for hook in itertools.chain( 724 _global_forward_hooks.values(),
H:\Anaconda\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input) 124 return F.embedding( 125 input, self.weight, self.padding_idx, self.max_norm, --> 126 self.norm_type, self.scale_grad_by_freq, self.sparse) 127 128 def extra_repr(self) -> str:
H:\Anaconda\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse) 1812 # remove once script supports set_grad_enabled 1813 no_grad_embedding_renorm(weight, input, max_norm, norm_type) -> 1814 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse) 1815 1816
RuntimeError: Expected tensor for argument #1 'indices' to have scalar type Long; but got torch.IntTensor instead (while checking arguments for embedding)
need to change
fields, target = fields.to(device).long(), target.to(device).long()