Tensor sizing error in NN

I have the following Hope somebody can help

My problem is tensor size… my tensor are :

y_data                   torch.Size([34, 36])
x_data                   torch.Size([34, 2])
first_layer= x_data.shape[1]
second_layer = first_layer * 6

class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)   # hidden layer
        self.predict = torch.nn.Linear(n_hidden, 36)   # output layer
        #self.relu(n_hidden)
    def forward(self, x):
        x = torch.tanh(self.hidden(x))
        x = self.predict(x)
        return x

def model(x_data, y_data):
    # weight and bias priors
    mu = Variable(torch.zeros(second_layer, first_layer)).type_as(x_data)
    sigma = Variable(torch.ones(second_layer, first_layer)).type_as(x_data)
    bias_mu = Variable(torch.zeros(second_layer)).type_as(x_data)
    bias_sigma = Variable(torch.ones(second_layer)).type_as(x_data)
    w_prior, b_prior = Normal(mu, sigma), Normal(bias_mu, bias_sigma)

    mu2 = Variable(torch.zeros(1, second_layer)).type_as(x_data)
    sigma2 = Variable(torch.ones(1, second_layer)).type_as(x_data)
    bias_mu2 = Variable(torch.zeros(1)).type_as(x_data)
    bias_sigma2 = Variable(torch.ones(1)).type_as(x_data)
    w_prior2, b_prior2 = Normal(mu2, sigma2), Normal(bias_mu2, bias_sigma2)

    priors = {'hidden.weight': w_prior,
              'hidden.bias': b_prior,
              'predict.weight': w_prior2,
              'predict.bias': b_prior2}
    scale = Variable(torch.ones(x_data.size(0))).type_as(x_data)
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", regression_model, priors)
    # sample a nn (which also samples w and b)
    lifted_reg_model = lifted_module()
    with pyro.plate("map", len(x_data)):
        # run the nn forward on data
        prediction_mean = lifted_reg_model(x_data).squeeze(-1)
        # condition on the observed data
        pyro.sample("obs",
                    Normal(prediction_mean, scale),
                    obs=y_data)
        return prediction_mean

 from pyro.contrib.autoguide import AutoDiagonalNormal
 guide = AutoDiagonalNormal(model)
 optim = Adam({"lr": 0.01})
 svi = SVI(model, guide, optim, loss=Trace_ELBO(), num_samples=50000)
 type(svi)


def train():
    pyro.clear_param_store()
    for j in range(num_iterations):
        # calculate the loss and take a gradient step
        loss = svi.step(x_data, y_data)
        if j % 100 == 0:
            print("[iteration %04d] loss: %.4f" % (j + 1, loss / len(x_data)))

train()

then the following error:

RuntimeError                              Traceback (most recent call last)
<ipython-input-565-2da0ffaf5447> in <module>()
----> 1 train()

<ipython-input-561-e3d6a0ef7843> in train()
      3     for j in range(num_iterations):
      4         # calculate the loss and take a gradient step
----> 5         loss = svi.step(x_data, y_data)
      6         if j % 100 == 0:
      7             print("[iteration %04d] loss: %.4f" % (j + 1, loss / len(x_data)))

~/.conda/envs/fastai/lib/python3.6/site-packages/pyro/infer/svi.py in step(self, *args, **kwargs)
     97         # get loss and compute gradients
     98         with poutine.trace(param_only=True) as param_capture:
---> 99             loss = self.loss_and_grads(self.model, self.guide, *args, **kwargs)
    100 
    101         params = set(site["value"].unconstrained()

~/.conda/envs/fastai/lib/python3.6/site-packages/pyro/infer/trace_elbo.py in loss_and_grads(self, model, guide, *args, **kwargs)
    123         loss = 0.0
    124         # grab a trace from the generator
--> 125         for model_trace, guide_trace in self._get_traces(model, guide, *args, **kwargs):
    126             loss_particle, surrogate_loss_particle = self._differentiable_loss_particle(model_trace, guide_trace)
    127             loss += loss_particle / self.num_particles

~/.conda/envs/fastai/lib/python3.6/site-packages/pyro/infer/elbo.py in _get_traces(self, model, guide, *args, **kwargs)
    161         else:
    162             for i in range(self.num_particles):
--> 163                 yield self._get_trace(model, guide, *args, **kwargs)

~/.conda/envs/fastai/lib/python3.6/site-packages/pyro/infer/trace_elbo.py in _get_trace(self, model, guide, *args, **kwargs)
     50         """
     51         model_trace, guide_trace = get_importance_trace(
---> 52             "flat", self.max_plate_nesting, model, guide, *args, **kwargs)
     53         if is_validation_enabled():
     54             check_if_enumerated(guide_trace)

~/.conda/envs/fastai/lib/python3.6/site-packages/pyro/infer/enum.py in get_importance_trace(graph_type, max_plate_nesting, model, guide, *args, **kwargs)
     49     model_trace = prune_subsample_sites(model_trace)
     50 
---> 51     model_trace.compute_log_prob()
     52     guide_trace.compute_score_parts()
     53     if is_validation_enabled():

~/.conda/envs/fastai/lib/python3.6/site-packages/pyro/poutine/trace_struct.py in compute_log_prob(self, site_filter)
    161                 if "log_prob" not in site:
    162                     try:
--> 163                         log_p = site["fn"].log_prob(site["value"], *site["args"], **site["kwargs"])
    164                     except ValueError:
    165                         _, exc_value, traceback = sys.exc_info()

~/.conda/envs/fastai/lib/python3.6/site-packages/torch/distributions/normal.py in log_prob(self, value)
     74         var = (self.scale ** 2)
     75         log_scale = math.log(self.scale) if isinstance(self.scale, Number) else self.scale.log()
---> 76         return -((value - self.loc) ** 2) / (2 * var) - log_scale - math.log(math.sqrt(2 * math.pi))
     77 
     78     def cdf(self, value):

RuntimeError: The size of tensor a (36) must match the size of tensor b (34) at non-singleton dimension 1

prediction_mean is shape (34) but y_data is (34, 36). note that the dimensions defined in Net are overwritten by the shapes of the priors you specify in your lifted_module

Sorry for it is still not clear, thank you in advance!