XBNet icon indicating copy to clipboard operation
XBNet copied to clipboard

About XBNETRegressor

Open jckkvs opened this issue 1 year ago • 0 comments

Have you tested XBNETRegressor? I made a code using XBNETRegressor but I get a RunTimeError.

import torch
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from XBNet.training_utils import training,predict
from XBNet.models import XBNETClassifier, XBNETRegressor
from XBNet.run import run_XBNET
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 0)

model = XBNETRegressor(X_train, y_train, 2)

criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

param_grid = {}
m,acc, lo, val_ac, val_lo = run_XBNET(X_train,X_test,y_train,y_test,model,criterion,optimizer,32,300)
print(predict(m,x_data.to_numpy()[0,:]))

RuntimeError Traceback (most recent call last) Input In [15], in ----> 1 m,acc, lo, val_ac, val_lo = run_XBNET(X_train,X_test,y_train,y_test,model,criterion,optimizer,32,300) 2 print(predict(m,x_data.to_numpy()[0,:]))

File ~\Anaconda3\envs\py310\lib\site-packages\XBNet\run.py:40, in run_XBNET(X_train, X_test, y_train, y_test, model, criterion, optimizer, batch_size, epochs, save) 38 trainDataload = DataLoader(Data(X_train, y_train), batch_size=batch_size) 39 testDataload = DataLoader(Data(X_test, y_test), batch_size=batch_size) ---> 40 acc, lo, val_ac, val_lo = training(model, trainDataload, testDataload, 41 criterion, optimizer, epochs,save= save) 42 return model,acc, lo, val_ac, val_lo

File ~\Anaconda3\envs\py310\lib\site-packages\XBNet\training_utils.py:44, in training(model, trainDataload, testDataload, criterion, optimizer, epochs, save) 42 loss = criterion(y_pred, out.long()) 43 running_loss += loss.item() ---> 44 loss.backward() 45 optimizer.step() 46 optimizer.zero_grad()

File ~\Anaconda3\envs\py310\lib\site-packages\torch_tensor.py:487, in Tensor.backward(self, gradient, retain_graph, create_graph, inputs) 477 if has_torch_function_unary(self): 478 return handle_torch_function( 479 Tensor.backward, 480 (self,), (...) 485 inputs=inputs, 486 ) --> 487 torch.autograd.backward( 488 self, gradient, retain_graph, create_graph, inputs=inputs 489 )

File ~\Anaconda3\envs\py310\lib\site-packages\torch\autograd_init_.py:200, in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs) 195 retain_graph = create_graph 197 # The reason we repeat same the comment below is that 198 # some Python versions print out the first line of a multi-line function 199 # calls in the traceback and some print out the last line --> 200 Variable.execution_engine.run_backward( # Calls into the C++ engine to run the backward pass 201 tensors, grad_tensors, retain_graph, create_graph, inputs, 202 allow_unreachable=True, accumulate_grad=True)

RuntimeError: Found dtype Long but expected Float

jckkvs avatar Jun 02 '23 08:06 jckkvs