Hi, I’m using a model that only has a nnet that predicts the mean of a distribution (and so I don’t have any pyro.sample()
statements in my model), and an autodiagonalnormal guide.
When I run the SVI I get the following error: RuntimeError: AutoDiagonalNormal found no latent variables; Use an empty guide instead
.
Shouldn’t it be picking up my priors on the nnet weights?
And why are these priors correctly mapped by the autoguide in the Bayesian regression tutorial Part I (cause there they also use a nnet and autoguide. Does the fact that they are explicitly sampling a stdev make the autoguide to pick up the nnet weight priors?) ?
Thanks!
class OneLayerRegressionModel(nn.Module):
def __init__(self, n_in):
# n_in = number of features
super(OneLayerRegressionModel, self).__init__()
n_out = 1
self.linear_1 = nn.Linear(n_in, n_out)
def forward(self, x):
# x * w + b
return torch.relu(self.linear_1(x))
def model(dataset_total_length, x_data, y_data):
# priors
w_prior = Normal(torch.zeros(1, n_features), torch.ones(1, n_features)).to_event(1)
b_prior = Normal(torch.tensor([[0.]]), torch.tensor([[1.]])).to_event(1)
priors = {'linear.weight': w_prior, 'linear.bias': b_prior}
lifted_module = pyro.random_module('module', regression_model, priors)
lifted_module_sample = lifted_module()
with pyro.plate('map', dataset_total_length, subsample=x_data):
prediction_mean = lifted_module_sample(x_data).squeeze(-1)
pyro.sample('observations', Exponential(prediction_mean), obs=y_data)
return prediction_mean
regression_model = OneLayerRegressionModel(n_features)
guide = AutoDiagonalNormal(model)