gpytorch
gpytorch copied to clipboard
[Docs] composite kernel usage
I working on modeling a dataset in which the part of the input data has a grid structure. To model this problem using GP regression, I am trying to use a composite kernel involving the product of GridInterpolationKernel and SM kernel (with ard_num_dims = 2). Using gpytorch for this problem, I am getting the following error.
raise RuntimeError( RuntimeError: Expected the input to have 1 dimensionality (based on the ard_num_dims argument). Got 2.
I suspect I am not passing the training inputs as expected by the gpytorch ExactGP implementation. It would be helpful if someone can point me to the mistake in my code or refer to an example code with a product kernel with multi-dimensional inputs.
Below sample code should reproduce the above-mentioned error.
import math
import torch
import gpytorch
def create_grid_ND(grid_list):
"""
This function creates grid from list of sub-grids, each sub-grid size is M_ixd_i
"""
idx_list = [torch.arange(0, len(grid_i)) for grid_i in grid_list ]
idx_grid = torch.meshgrid(*idx_list, indexing='ij')
expand_grid_list = [grid_list[i][idx_grid[i].reshape(-1)] for i in range(len(grid_list))]
return torch.cat(expand_grid_list, axis=1)
grid_size = 40
x_0 = torch.linspace(0,1,grid_size).reshape(-1, 1)
x_1 = torch.linspace(0,1,grid_size).reshape(-1, 1)
x_2_3 = torch.rand(10, 2)
# create inputs
train_x = create_grid_ND([x_0, x_1, x_2_3])
spatial_grid = create_grid_ND([x_0, x_1])
# True function is x0 * sin( 2*pi*(x1)) * cos(2*pi*(x2 + x3))
train_y = train_x[:, 0] * torch.sin(train_x[:, 1] * (2 * math.pi)) * torch.cos((train_x[:, 2] + train_x[:, 3]) * (2 * math.pi))
#
class CustomSpectralMixtureGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, grid, grid_size, num_mixtures=4):
super(CustomSpectralMixtureGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
# self.spatial_covar_01 = gpytorch.kernels.GridKernel(
# gpytorch.kernels.SpectralMixtureKernel(num_mixtures=num_mixtures), grid=grid)
self.spatial_covar_01 = gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.SpectralMixtureKernel(num_mixtures=num_mixtures), grid_size=grid_size,
grid_bounds = [(0, 1), (0, 1)])
self.spatial_covar_23 = gpytorch.kernels.SpectralMixtureKernel(num_mixtures=num_mixtures, ard_num_dims=2)
# k = k_x0 * k_x1 * k_x23
# K = K_x0 \kron K_x1 \kron K_x23
self.covar_module = gpytorch.kernels.ScaleKernel(self.spatial_covar_01 * self.spatial_covar_23)
# self.covar_module.initialize_from_data(train_x, train_y)
def forward(self,x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = CustomSpectralMixtureGPModel(train_x, train_y, likelihood, spatial_grid, grid_size,
num_mixtures = 4)
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for epoch in range(50):
optimizer.zero_grad()
mod_outputs = model(train_x)
# loss
loss = -mll(mod_outputs, train_y)
loss.backward()
optimizer.step()
full error
Traceback (most recent call last):
File "/home/code/test/grid_gpr_test.py", line 59, in <module>
loss = -mll(mod_outputs, train_y)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/module.py", line 30, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py", line 62, in forward
res = output.log_prob(target)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/distributions/multivariate_normal.py", line 168, in log_prob
covar = covar.evaluate_kernel()
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/lazy/added_diag_lazy_tensor.py", line 187, in evaluate_kernel
added_diag_lazy_tsr = self.representation_tree()(*self.representation())
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/lazy/lazy_tensor.py", line 1568, in representation_tree
return LazyTensorRepresentationTree(self)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/lazy/lazy_tensor_representation_tree.py", line 13, in __init__
representation_size = len(arg.representation())
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 377, in representation
return self.evaluate_kernel().representation()
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/utils/memoize.py", line 59, in g
return _add_to_cache(self, cache_name, method(self, *args, **kwargs), *args, kwargs_pkl=kwargs_pkl)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py", line 332, in evaluate_kernel
res = self.kernel(
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/kernel.py", line 402, in __call__
res = lazify(super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params))
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/module.py", line 30, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/scale_kernel.py", line 103, in forward
orig_output = self.base_kernel.forward(x1, x2, diag=diag, last_dim_is_batch=last_dim_is_batch, **params)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/kernel.py", line 516, in forward
res = self.kernels[0](x1, x2, diag=diag, **params)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/kernel.py", line 402, in __call__
res = lazify(super(Kernel, self).__call__(x1_, x2_, last_dim_is_batch=last_dim_is_batch, **params))
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/module.py", line 30, in __call__
outputs = self.forward(*inputs, **kwargs)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 183, in forward
base_lazy_tsr = lazify(self._inducing_forward(last_dim_is_batch=last_dim_is_batch, **params))
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/grid_interpolation_kernel.py", line 146, in _inducing_forward
return super().forward(self.grid, self.grid, last_dim_is_batch=last_dim_is_batch, **params)
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/grid_kernel.py", line 141, in forward
covars = delazify(self.base_kernel(first_grid_point, full_grid, last_dim_is_batch=True, **params))
File "/home/dama/miniconda3/envs/gpm/lib/python3.8/site-packages/gpytorch/kernels/kernel.py", line 384, in __call__
raise RuntimeError(
RuntimeError: Expected the input to have 1 dimensionality (based on the ard_num_dims argument). Got 2.