gpytorch
gpytorch copied to clipboard
[Bug] test_t_matmul_matrix fails
🐛 Bug
The test fails with:
RuntimeError: Should not have ended up in LazyEvaluatedKernelTensor._matmul without kernel checkpointing. This is probably a bug in GPyTorch.
To reproduce
Run tests
** Stack trace/error message **
___________ TestLazyEvaluatedKernelTensorBatch.test_t_matmul_matrix ____________
self = <test.lazy.test_lazy_evaluated_kernel_tensor.TestLazyEvaluatedKernelTensorBatch testMethod=test_t_matmul_matrix>
def test_t_matmul_matrix(self):
with torch.no_grad():
linear_op = self.create_linear_op()
rhs = torch.randn(*linear_op.batch_shape, linear_op.size(-2), 4)
linear_op_copy = torch.clone(linear_op)
evaluated = self.evaluate_linear_op(linear_op_copy)
rhs_evaluated = to_dense(rhs)
# Test operator
> res = linear_op._t_matmul(rhs)
/nix/store/ghl89pdl4wyqifz498kzz9n20pph3jsh-python3.10-linear_operator-0.5.1/lib/python3.10/site-packages/linear_operator/test/linear_operator_test_case.py:403:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/nix/store/ghl89pdl4wyqifz498kzz9n20pph3jsh-python3.10-linear_operator-0.5.1/lib/python3.10/site-packages/linear_operator/operators/_linear_operator.py:914: in _t_matmul
return self.mT._matmul(rhs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <gpytorch.lazy.lazy_evaluated_kernel_tensor.LazyEvaluatedKernelTensor object at 0x19da25180>
rhs = tensor([[[-1.1256e+00, -3.1700e-01, -1.0925e+00, -8.5194e-02],
[-9.3348e-02, 6.8705e-01, -8.3832e-01, 8.918... [ 1.2390e-01, 1.1648e+00, 9.2337e-01, 1.3873e+00],
[ 1.3750e+00, 6.5963e-01, 4.7656e-01, -1.0163e+00]]])
def _matmul(self, rhs):
# This _matmul is defined computes the kernel in chunks
# It is only used when we are using kernel checkpointing
# It won't be called if checkpointing is off
x1 = self.x1
x2 = self.x2
split_size = beta_features.checkpoint_kernel.value()
if not split_size:
> raise RuntimeError(
"Should not have ended up in LazyEvaluatedKernelTensor._matmul without kernel checkpointing. "
"This is probably a bug in GPyTorch."
)
E RuntimeError: Should not have ended up in LazyEvaluatedKernelTensor._matmul without kernel checkpointing. This is probably a bug in GPyTorch.
gpytorch/lazy/lazy_evaluated_kernel_tensor.py:254: RuntimeError
_______ TestLazyEvaluatedKernelTensorMultitaskBatch.test_t_matmul_matrix _______
self = <test.lazy.test_lazy_evaluated_kernel_tensor.TestLazyEvaluatedKernelTensorMultitaskBatch testMethod=test_t_matmul_matrix>
def test_t_matmul_matrix(self):
with torch.no_grad():
linear_op = self.create_linear_op()
rhs = torch.randn(*linear_op.batch_shape, linear_op.size(-2), 4)
linear_op_copy = torch.clone(linear_op)
evaluated = self.evaluate_linear_op(linear_op_copy)
rhs_evaluated = to_dense(rhs)
# Test operator
> res = linear_op._t_matmul(rhs)
/nix/store/ghl89pdl4wyqifz498kzz9n20pph3jsh-python3.10-linear_operator-0.5.1/lib/python3.10/site-packages/linear_operator/test/linear_operator_test_case.py:403:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/nix/store/ghl89pdl4wyqifz498kzz9n20pph3jsh-python3.10-linear_operator-0.5.1/lib/python3.10/site-packages/linear_operator/operators/_linear_operator.py:914: in _t_matmul
return self.mT._matmul(rhs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <gpytorch.lazy.lazy_evaluated_kernel_tensor.LazyEvaluatedKernelTensor object at 0x19de19870>
rhs = tensor([[[ 1.6459, -1.3602, 0.3446, 0.5199],
[-2.6133, -1.6965, -0.2282, 0.2800],
[ 0.2469, 0.07...891, -1.5100, 1.0241],
[ 0.1954, -0.7371, 1.7001, 0.3462],
[ 0.9711, 1.4503, -0.0519, -0.6284]]])
def _matmul(self, rhs):
# This _matmul is defined computes the kernel in chunks
# It is only used when we are using kernel checkpointing
# It won't be called if checkpointing is off
x1 = self.x1
x2 = self.x2
split_size = beta_features.checkpoint_kernel.value()
if not split_size:
> raise RuntimeError(
"Should not have ended up in LazyEvaluatedKernelTensor._matmul without kernel checkpointing. "
"This is probably a bug in GPyTorch."
)
E RuntimeError: Should not have ended up in LazyEvaluatedKernelTensor._matmul without kernel checkpointing. This is probably a bug in GPyTorch.
gpytorch/lazy/lazy_evaluated_kernel_tensor.py:254: RuntimeError
__________ TestLazyEvaluatedKernelTensorAdditive.test_t_matmul_matrix __________
self = <test.lazy.test_lazy_evaluated_kernel_tensor.TestLazyEvaluatedKernelTensorAdditive testMethod=test_t_matmul_matrix>
def test_t_matmul_matrix(self):
with torch.no_grad():
linear_op = self.create_linear_op()
rhs = torch.randn(*linear_op.batch_shape, linear_op.size(-2), 4)
linear_op_copy = torch.clone(linear_op)
evaluated = self.evaluate_linear_op(linear_op_copy)
rhs_evaluated = to_dense(rhs)
# Test operator
> res = linear_op._t_matmul(rhs)
/nix/store/ghl89pdl4wyqifz498kzz9n20pph3jsh-python3.10-linear_operator-0.5.1/lib/python3.10/site-packages/linear_operator/test/linear_operator_test_case.py:403:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/nix/store/ghl89pdl4wyqifz498kzz9n20pph3jsh-python3.10-linear_operator-0.5.1/lib/python3.10/site-packages/linear_operator/operators/_linear_operator.py:914: in _t_matmul
return self.mT._matmul(rhs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <gpytorch.lazy.lazy_evaluated_kernel_tensor.LazyEvaluatedKernelTensor object at 0x19d962410>
rhs = tensor([[ 0.9383, 0.4889, -0.6731, 0.8728],
[ 1.1108, 1.2899, -1.4782, 2.5672],
[-0.4731, 0.3356, 1.5091, 2.0820],
[-0.4798, -0.4997, -1.0670, 1.1149],
[-0.1407, 0.8058, 0.3276, -0.7607]])
def _matmul(self, rhs):
# This _matmul is defined computes the kernel in chunks
# It is only used when we are using kernel checkpointing
# It won't be called if checkpointing is off
x1 = self.x1
x2 = self.x2
split_size = beta_features.checkpoint_kernel.value()
if not split_size:
> raise RuntimeError(
"Should not have ended up in LazyEvaluatedKernelTensor._matmul without kernel checkpointing. "
"This is probably a bug in GPyTorch."
)
E RuntimeError: Should not have ended up in LazyEvaluatedKernelTensor._matmul without kernel checkpointing. This is probably a bug in GPyTorch.
gpytorch/lazy/lazy_evaluated_kernel_tensor.py:254: RuntimeError
Expected Behavior
System information
Please complete the following information:
- gpytorch 1.11
- linear_operator 0.5.1
- torch 2.0.1
- nixpkgs
Additional context
None