class BayesianRegression(PyroModule):
def __init__(self):
super().__init__()
self.linear1 = PyroModule[nn.Linear](1, 20)
self.linear1.weight = PyroSample(dist.Normal(0., 1.).expand([20, 1]).to_event(2))
self.linear1.bias = PyroSample(dist.Normal(0., 10.).expand([20]).to_event(1))
self.linear2 = PyroModule[nn.Linear](20, 1)
self.linear2.weight = PyroSample(dist.Normal(0., 1.).expand([1, 20]).to_event(2))
self.linear2.bias = PyroSample(dist.Normal(0., 10.).expand([1]).to_event(1))
def forward(self, x, y=None):
sigma = pyro.sample("sigma", dist.Uniform(0., 10.))
mean = self.linear2(torch.tanh(self.linear1(x)))
mean = mean.squeeze(-1)
with pyro.plate("data", x.shape[0]):
obs = pyro.sample("obs", dist.Normal(mean, sigma), obs=y)
return mean
model = BayesianRegression()
guide = AutoDiagonalNormal(model)
So i followed the tutorial from pyro and wanted to add some complexity to the model(on a 1 dimensional dataset). I added a nonlinearity and a second layer. But when trying to train the model, I got worst results than a simple linear model.
My question is now…do i implement something wrong or is the AutoDiagonalNormal(model) wrong for this example?