openpi icon indicating copy to clipboard operation
openpi copied to clipboard

Can not run inference.ipynb in the example

Open dengpingsong opened this issue 4 months ago • 8 comments

Cell In[5], line 7 2 config = dataclasses.replace(config, batch_size=2) 4 # Load a single batch of data. This is the same data that will be used during training. 5 # NOTE: In order to make this example self-contained, we are skipping the normalization step 6 # since it requires the normalization statistics to be generated using compute_norm_stats. ----> 7 loader = _data_loader.create_data_loader(config, num_batches=1, skip_norm_stats=True) 8 obs, act = next(iter(loader)) 10 # If obs or act is a Column, extract the values It shows that might cause by site package.

TypeError: stack(): argument 'tensors' (position 1) must be tuple of Tensors, not Column Hide Details

TypeError Traceback (most recent call last) Cell In[5], line 7 2 config = dataclasses.replace(config, batch_size=2) 4 # Load a single batch of data. This is the same data that will be used during training. 5 # NOTE: In order to make this example self-contained, we are skipping the normalization step 6 # since it requires the normalization statistics to be generated using compute_norm_stats. ----> 7 loader = _data_loader.create_data_loader(config, num_batches=1, skip_norm_stats=True) 8 obs, act = next(iter(loader)) 10 # If obs or act is a Column, extract the values

File ~/openpi/src/openpi/training/data_loader.py:242, in create_data_loader(config, sharding, shuffle, num_batches, skip_norm_stats) 232 if data_config.rlds_data_dir is not None: 233 return create_rlds_data_loader( 234 data_config, 235 action_horizon=config.model.action_horizon, (...) 240 skip_norm_stats=skip_norm_stats, 241 )

--> 242 return create_torch_data_loader( 243 data_config, 244 model_config=config.model, 245 action_horizon=config.model.action_horizon, 246 batch_size=config.batch_size, 247 sharding=sharding, 248 shuffle=shuffle, 249 num_batches=num_batches, 250 num_workers=config.num_workers, 251 seed=config.seed, 252 skip_norm_stats=skip_norm_stats, 253 )

File ~/openpi/src/openpi/training/data_loader.py:286, in create_torch_data_loader(data_config, model_config, action_horizon, batch_size, sharding, skip_norm_stats, shuffle, num_batches, num_workers, seed) 256 def create_torch_data_loader( 257 data_config: _config.DataConfig, 258 model_config: _model.BaseModelConfig, (...) 267 seed: int = 0, 268 ) -> DataLoader[tuple[_model.Observation, _model.Actions]]: 269 """Create a data loader for training. 270 271 Args: (...) 284 seed: The seed to use for shuffling the data. 285 """ --> 286 dataset = create_torch_dataset(data_config, action_horizon, model_config) 287 dataset = transform_dataset(dataset, data_config, skip_norm_stats=skip_norm_stats) 289 data_loader = TorchDataLoader( 290 dataset, 291 local_batch_size=batch_size // jax.process_count(), (...) 296 seed=seed, 297 )

File ~/openpi/src/openpi/training/data_loader.py:140, in create_torch_dataset(data_config, action_horizon, model_config) 137 return FakeDataset(model_config, num_samples=1024) 139 dataset_meta = lerobot_dataset.LeRobotDatasetMetadata(repo_id) --> 140 dataset = lerobot_dataset.LeRobotDataset( 141 data_config.repo_id, 142 delta_timestamps={ 143 key: [t / dataset_meta.fps for t in range(action_horizon)] for key in data_config.action_sequence_keys 144 }, 145 ) 147 if data_config.prompt_from_task: 148 dataset = TransformedDataset(dataset, [_transforms.PromptFromLeRobotTask(dataset_meta.tasks)])

File ~/miniconda3/envs/pi0/lib/python3.11/site-packages/lerobot/common/datasets/lerobot_dataset.py:508, in LeRobotDataset.init(self, repo_id, root, episodes, image_transforms, delta_timestamps, tolerance_s, revision, force_cache_sync, download_videos, video_backend) 505 self.episode_data_index = get_episode_data_index(self.meta.episodes, self.episodes) 507 # Check timestamps --> 508 timestamps = torch.stack(self.hf_dataset["timestamp"]).numpy() 509 episode_indices = torch.stack(self.hf_dataset["episode_index"]).numpy() 510 ep_data_index_np = {k: t.numpy() for k, t in self.episode_data_index.items()}

TypeError: stack(): argument 'tensors' (position 1) must be tuple of Tensors, not Column

dengpingsong avatar Jul 11 '25 03:07 dengpingsong