I tried to implement GaussianRandomWalk but still cannot do mcmc when data.shape > 200
(less than 200 is fine).
Updated: I changed random seed and use very small step_size and be able to make mcmc run on full data. I have tried to debug to see why for some seeds it failed and observed that gradient blow up at those time. I donโt know how to deal with this issue.
class GaussianRandomWalk(dist.TorchDistribution):
has_rsample = True
arg_constraints = {'scale': constraints.positive}
support = constraints.real
def __init__(self, scale, num_steps=1):
self.scale = scale
batch_shape, event_shape = scale.shape, torch.Size([num_steps])
super(GaussianRandomWalk, self).__init__(batch_shape, event_shape)
def rsample(self, sample_shape=torch.Size()):
shape = sample_shape + self.batch_shape + self.event_shape
walks = self.scale.new_empty(shape).normal_()
return walks.cumsum(-1) * self.scale.unsqueeze(-1)
def log_prob(self, x):
init_prob = dist.Normal(self.scale.new_tensor(0.), self.scale).log_prob(x[..., 0])
step_probs = dist.Normal(x[..., :-1], self.scale).log_prob(x[..., 1:])
return init_prob + step_probs.sum(-1)
def model(data):
T = len(data)
sigma = pyro.sample('sigma', dist.Exponential(50.))
nu = pyro.sample('nu', dist.Exponential(0.1))
h = pyro.sample("h", GaussianRandomWalk(scale=sigma, num_steps=T))
y = pyro.sample("returns", dist.StudentT(df=nu, loc=torch.zeros(T),
scale=torch.exp(h)).independent(1), obs=torch.tensor(data))
return y
torch.set_default_dtype(torch.float64)
pyro.enable_validation()
pyro.set_rng_seed(2)
pyro.clear_param_store()
nuts_kernel = NUTS(model, step_size=0.00001, adapt_step_size=False)
mcmc_run = MCMC(nuts_kernel, num_samples=1, warmup_steps=2).run(data)