differentiable-mpc
differentiable-mpc copied to clipboard
`ValueError: can't optimize a non-leaf Tensor` on sysid for the pendulum experiments
It seems that one of the tensors passed to RMSProp is not a leaf tensors (it was created through some operations that were registered in the grad graph).
To reproduce, just execute python imitation_nonconvex/il_exp.py --mode sysid
.
I am using pytorch 1.31.1.
Error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
File /mnt/hdd/Desktop/differentiable-mpc/imitation_nonconvex/il_exp.py:506
502 return loss
505 if __name__ == "__main__":
--> 506 main()
File /mnt/hdd/Desktop/differentiable-mpc/imitation_nonconvex/il_exp.py:64, in main()
61 args.learn_dx = True
63 exp = IL_Exp(**vars(args))
---> 64 exp.run()
exp = <__main__.IL_Exp object at 0x7f376769b0d0>
File /mnt/hdd/Desktop/differentiable-mpc/imitation_nonconvex/il_exp.py:260, in IL_Exp.run(self=<__main__.IL_Exp object>)
249 params = [{
250 'params': self.env_params,
251 'lr': 1e-2,
252 'alpha': 0.5,
253 }]
255 # if self.env_name == 'pendulum-complex':
256 # params.append({
257 # 'params': self.extra_dx.parameters(),
258 # 'lr': 1e-4,
259 # })
--> 260 opt = optim.RMSprop(params)
params = [{'params': [tensor([15.0000, 3.0000, 0.5000], device='cuda:0', grad_fn=<ToCopyBackward0>)], 'lr': 0.01, 'alpha': 0.5}]
optim = <module 'torch.optim' from '/home/brunompacheco/miniconda3/envs/mpc-diff-pytorch/lib/python3.10/site-packages/torch/optim/__init__.py'>
261 else:
262 assert False
File ~/miniconda3/envs/mpc-diff-pytorch/lib/python3.10/site-packages/torch/optim/rmsprop.py:87, in RMSprop.__init__(self=RMSprop (), params=[{'alpha': 0.5, 'lr': 0.01, 'params': [tensor([15.0000, 3.0000, 0.5000], device='cuda:0', grad_fn=<ToCopyBackward0>)]}], lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False, foreach=None, maximize=False, differentiable=False)
82 raise ValueError("Invalid alpha value: {}".format(alpha))
84 defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered,
85 weight_decay=weight_decay, foreach=foreach, maximize=maximize,
86 differentiable=differentiable)
---> 87 super(RMSprop, self).__init__(params, defaults)
defaults = {'lr': 0.01, 'momentum': 0, 'alpha': 0.99, 'eps': 1e-08, 'centered': False, 'weight_decay': 0, 'foreach': None, 'maximize': False, 'differentiable': False}
params = [{'params': [tensor([15.0000, 3.0000, 0.5000], device='cuda:0', grad_fn=<ToCopyBackward0>)], 'lr': 0.01, 'alpha': 0.5}]
self = RMSprop ()
File ~/miniconda3/envs/mpc-diff-pytorch/lib/python3.10/site-packages/torch/optim/optimizer.py:66, in Optimizer.__init__(self=RMSprop (), params=[{'alpha': 0.5, 'lr': 0.01, 'params': [tensor([15.0000, 3.0000, 0.5000], device='cuda:0', grad_fn=<ToCopyBackward0>)]}], defaults={'alpha': 0.99, 'centered': False, 'differentiable': False, 'eps': 1e-08, 'foreach': None, 'lr': 0.01, 'maximize': False, 'momentum': 0, 'weight_decay': 0})
63 param_groups = [{'params': param_groups}]
65 for param_group in param_groups:
---> 66 self.add_param_group(param_group)
param_group = {'params': [tensor([15.0000, 3.0000, 0.5000], device='cuda:0', grad_fn=<ToCopyBackward0>)], 'lr': 0.01, 'alpha': 0.5}
self = RMSprop ()
68 # Allows _cuda_graph_capture_health_check to rig a poor man's TORCH_WARN_ONCE in python,
69 # which I don't think exists
70 # https://github.com/pytorch/pytorch/issues/72948
71 self._warned_capturable_if_run_uncaptured = True
File ~/miniconda3/envs/mpc-diff-pytorch/lib/python3.10/site-packages/torch/optim/optimizer.py:326, in Optimizer.add_param_group(self=RMSprop (), param_group={'alpha': 0.5, 'lr': 0.01, 'params': [tensor([15.0000, 3.0000, 0.5000], device='cuda:0', grad_fn=<ToCopyBackward0>)]})
323 raise TypeError("optimizer can only optimize Tensors, "
324 "but one of the params is " + torch.typename(param))
325 if not self.defaults.get('differentiable', None) and not (param.is_leaf or param.retains_grad):
--> 326 raise ValueError("can't optimize a non-leaf Tensor")
328 for name, default in self.defaults.items():
329 if default is required and name not in param_group:
ValueError: can't optimize a non-leaf Tensor
> /home/brunompacheco/miniconda3/envs/mpc-diff-pytorch/lib/python3.10/site-packages/torch/optim/optimizer.py(326)add_param_group()
324 "but one of the params is " + torch.typename(param))
325 if not self.defaults.get('differentiable', None) and not (param.is_leaf or param.retains_grad):
--> 326 raise ValueError("can't optimize a non-leaf Tensor")
327
328 for name, default in self.defaults.items():