moabb
moabb copied to clipboard
ValueError: need at least one array to concatenate When using Shin2017A dataset
Expected Result
Using Shin2017A dataset to do a CrossSession LeftRightImagery classification.
Current Result
Shin2017A-CrossSession: 0%| | 0/29 [01:04<?, ?it/s]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[9], line 3
1 pipes = {}
2 pipes["EEGNetV4"] = make_pipeline(clf)
----> 3 results = evaluation.process(pipelines=pipes)
4 print("============results===========")
5 print(results)
File /opt/conda/lib/python3.10/site-packages/moabb/evaluations/base.py:188, in BaseEvaluation.process(self, pipelines, param_grid, postprocess_pipeline)
179 # (we only keep the pipeline for the first frequency band, better ideas?)
181 results = self.evaluate(
182 dataset,
183 pipelines,
(...)
186 postprocess_pipeline=postprocess_pipeline,
187 )
--> 188 for res in results:
189 self.push_result(res, pipelines, process_pipeline)
191 return self.results.to_dataframe(
192 pipelines=pipelines, process_pipeline=process_pipeline
193 )
File /opt/conda/lib/python3.10/site-packages/moabb/evaluations/evaluations.py:548, in CrossSessionEvaluation.evaluate(self, dataset, pipelines, param_grid, process_pipeline, postprocess_pipeline)
542 save_model_cv(
543 model=cvclf,
544 save_path=model_save_path,
545 cv_index=str(cv_ind),
546 )
547 else:
--> 548 result = _fit_and_score(
549 clone(grid_clf),
550 X,
551 y,
552 scorer,
553 train,
554 test,
555 verbose=False,
556 parameters=None,
557 fit_params=None,
558 error_score=self.error_score,
559 return_estimator=True,
560 )
561 score = result["test_scores"]
562 model_list = result["estimator"]
File /opt/conda/lib/python3.10/site-packages/sklearn/model_selection/_validation.py:686, in _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score, return_parameters, return_n_test_samples, return_times, return_estimator, split_progress, candidate_progress, error_score)
684 estimator.fit(X_train, **fit_params)
685 else:
--> 686 estimator.fit(X_train, y_train, **fit_params)
688 except Exception:
689 # Note fit time as time until error
690 fit_time = time.time() - start_time
File /opt/conda/lib/python3.10/site-packages/sklearn/pipeline.py:405, in Pipeline.fit(self, X, y, **fit_params)
403 if self._final_estimator != "passthrough":
404 fit_params_last_step = fit_params_steps[self.steps[-1][0]]
--> 405 self._final_estimator.fit(Xt, y, **fit_params_last_step)
407 return self
File /opt/conda/lib/python3.10/site-packages/braindecode/eegneuralnet.py:384, in _EEGNeuralNet.fit(self, X, y, **fit_params)
382 self._set_signal_args(X, y, classes=None)
383 self.signal_args_set_ = True
--> 384 return super().fit(X=X, y=y, **fit_params)
File /opt/conda/lib/python3.10/site-packages/skorch/classifier.py:165, in NeuralNetClassifier.fit(self, X, y, **fit_params)
154 """See ``NeuralNet.fit``.
155
156 In contrast to ``NeuralNet.fit``, ``y`` is non-optional to
(...)
160
161 """
162 # pylint: disable=useless-super-delegation
163 # this is actually a pylint bug:
164 # [https://github.com/PyCQA/pylint/issues/1085](https://github.com/PyCQA/pylint/issues/1085%3C/span%3E)
--> 165 return super(NeuralNetClassifier, self).fit(X, y, **fit_params)
File /opt/conda/lib/python3.10/site-packages/skorch/net.py:1319, in NeuralNet.fit(self, X, y, **fit_params)
1316 if not self.warm_start or not self.initialized_:
1317 self.initialize()
-> 1319 self.partial_fit(X, y, **fit_params)
1320 return self
File /opt/conda/lib/python3.10/site-packages/braindecode/eegneuralnet.py:334, in _EEGNeuralNet.partial_fit(self, X, y, classes, **fit_params)
332 self._set_signal_args(X, y, classes)
333 self.signal_args_set_ = True
--> 334 return super().partial_fit(X=X, y=y, classes=classes, **fit_params)
File /opt/conda/lib/python3.10/site-packages/skorch/net.py:1278, in NeuralNet.partial_fit(self, X, y, classes, **fit_params)
1276 self.notify('on_train_begin', X=X, y=y)
1277 try:
-> 1278 self.fit_loop(X, y, **fit_params)
1279 except KeyboardInterrupt:
1280 pass
File /opt/conda/lib/python3.10/site-packages/skorch/net.py:1196, in NeuralNet.fit_loop(self, X, y, epochs, **fit_params)
1190 self.run_single_epoch(iterator_train, training=True, prefix="train",
1191 step_fn=self.train_step, **fit_params)
1193 self.run_single_epoch(iterator_valid, training=False, prefix="valid",
1194 step_fn=self.validation_step, **fit_params)
-> 1196 self.notify("on_epoch_end", **on_epoch_kwargs)
1197 return self
File /opt/conda/lib/python3.10/site-packages/skorch/net.py:386, in NeuralNet.notify(self, method_name, **cb_kwargs)
384 getattr(self, method_name)(self, **cb_kwargs)
385 for _, cb in self.callbacks_:
--> 386 getattr(cb, method_name)(self, **cb_kwargs)
File /opt/conda/lib/python3.10/site-packages/skorch/callbacks/scoring.py:489, in EpochScoring.on_epoch_end(self, net, dataset_train, dataset_valid, **kwargs)
486 return
488 with _cache_net_forward_iter(net, self.use_caching, y_pred) as cached_net:
--> 489 current_score = self._scoring(cached_net, X_test, y_test)
491 self._record_score(net.history, current_score)
File /opt/conda/lib/python3.10/site-packages/skorch/callbacks/scoring.py:181, in ScoringBase._scoring(self, net, X_test, y_test)
178 """Resolve scoring and apply it to data. Use cached prediction
179 instead of running inference again, if available."""
180 scorer = check_scoring(net, self.scoring_)
--> 181 return scorer(net, X_test, y_test)
File /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_scorer.py:234, in _BaseScorer.__call__(self, estimator, X, y_true, sample_weight)
211 def __call__(self, estimator, X, y_true, sample_weight=None):
212 """Evaluate predicted target values for X relative to y_true.
213
214 Parameters
(...)
232 Score function applied to prediction of estimator on X.
233 """
--> 234 return self._score(
235 partial(_cached_call, None),
236 estimator,
237 X,
238 y_true,
239 sample_weight=sample_weight,
240 )
File /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_scorer.py:276, in _PredictScorer._score(self, method_caller, estimator, X, y_true, sample_weight)
248 def _score(self, method_caller, estimator, X, y_true, sample_weight=None):
249 """Evaluate predicted target values for X relative to y_true.
250
251 Parameters
(...)
273 Score function applied to prediction of estimator on X.
274 """
--> 276 y_pred = method_caller(estimator, "predict", X)
277 if sample_weight is not None:
278 return self._sign * self._score_func(
279 y_true, y_pred, sample_weight=sample_weight, **self._kwargs
280 )
File /opt/conda/lib/python3.10/site-packages/sklearn/metrics/_scorer.py:73, in _cached_call(cache, estimator, method, *args, **kwargs)
71 """Call estimator with method and args and kwargs."""
72 if cache is None:
---> 73 return getattr(estimator, method)(*args, **kwargs)
75 try:
76 return cache[method]
File /opt/conda/lib/python3.10/site-packages/braindecode/classifier.py:198, in EEGClassifier.predict(self, X)
174 def predict(self, X):
175 """Return class labels for samples in X.
176
177 Parameters
(...)
196
197 """
--> 198 return self.predict_proba(X).argmax(1)
File /opt/conda/lib/python3.10/site-packages/braindecode/classifier.py:130, in EEGClassifier.predict_proba(self, X)
97 def predict_proba(self, X):
98 """Return the output of the module's forward method as a numpy
99 array. In case of cropped decoding returns averaged values for
100 each trial.
(...)
128
129 """
--> 130 y_pred = super().predict_proba(X)
131 # Normally, we have to average the predictions across crops/timesteps
132 # to get one prediction per window/trial
133 # Predictions may be already averaged in CroppedTrialEpochScoring (y_pred.shape==2).
134 # However, when predictions are computed outside of CroppedTrialEpochScoring
135 # we have to average predictions, hence the check if len(y_pred.shape) == 3
136 if self.cropped and self.aggregate_predictions and len(
137 y_pred.shape) == 3:
File /opt/conda/lib/python3.10/site-packages/skorch/classifier.py:200, in NeuralNetClassifier.predict_proba(self, X)
168 """Where applicable, return probability estimates for
169 samples.
170
(...)
196
197 """
198 # Only the docstring changed from parent.
199 # pylint: disable=useless-super-delegation
--> 200 return super().predict_proba(X)
File /opt/conda/lib/python3.10/site-packages/skorch/net.py:1599, in NeuralNet.predict_proba(self, X)
1597 yp = nonlin(yp)
1598 y_probas.append(to_numpy(yp))
-> 1599 y_proba = np.concatenate(y_probas, 0)
1600 return y_proba
ValueError: need at least one array to concatenate
Environment
Moabb Version: 1.0.0 Braindecode: 0.8.1
Thanks for raising this issue. Could you share a minimal code to reproduce this issue?
Thansk for @sylvchev's soon reply. Certainly. Just a little change according to the moabb example.
import os
import matplotlib.pyplot as plt
import torch
from absl.logging import ERROR, set_verbosity
from moabb import benchmark, set_log_level
from moabb.analysis.plotting import score_plot
from moabb.datasets import Shin2017A
from moabb.paradigms import LeftRightImagery
from moabb.utils import setup_seed
from braindecode import EEGClassifier
from braindecode.models import EEGNetv4
from sklearn.pipeline import make_pipeline
from skorch.callbacks import EarlyStopping, EpochScoring
from skorch.dataset import ValidSplit
from moabb.evaluations import CrossSessionEvaluation
set_log_level("info")
# Avoid output Warning
set_verbosity(ERROR)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Print Information PyTorch
print(f"Torch Version: {torch.__version__}")
# Set up GPU if it is there
cuda = torch.cuda.is_available()
device = "cuda" if cuda else "cpu"
# Set random seed to be able to reproduce results
seed = 42
setup_seed(seed)
# Restrict this example only to the first two subjects of BNCI2014_001
dataset = Shin2017A(accept=True)
dataset.subject_list = dataset.subject_list[:2]
datasets = [dataset]
# Hyperparameter
LEARNING_RATE = 0.0625 * 0.01 # parameter taken from Braindecode
WEIGHT_DECAY = 0 # parameter taken from Braindecode
BATCH_SIZE = 64 # parameter taken from BrainDecode
EPOCH = 10
PATIENCE = 3
fmin = 4
fmax = 100
tmin = 0
tmax = None
clf = EEGClassifier(
module=EEGNetv4,
optimizer=torch.optim.Adam,
optimizer__lr=LEARNING_RATE,
batch_size=BATCH_SIZE,
max_epochs=EPOCH,
train_split=ValidSplit(0.2, random_state=seed),
device=device,
callbacks=[
EarlyStopping(monitor="valid_loss", patience=PATIENCE),
EpochScoring(
scoring="accuracy", on_train=True, name="train_acc", lower_is_better=False
),
EpochScoring(
scoring="accuracy", on_train=False, name="valid_acc", lower_is_better=False
),
],
verbose=1, # Not printing the results for each epoch
)
# Create the pipelines
pipes = {}
pipes["EEGNetV4"] = make_pipeline(clf)
dataset.subject_list = dataset.subject_list[:2]
evaluation = CrossSessionEvaluation(
paradigm=LeftRightImagery(),
datasets=dataset,
suffix="braindecode_example",
overwrite=True,
return_epochs=True,
n_jobs=1,
)
results = evaluation.process(pipes)