Getting Error using Poisson + LR: The size of tensor a (88) must match the size of tensor b (7) at non-singleton dimension 1

Hi all,

I am new to pyro and I am trying to use bayesian logistic regression where the prior distributions are Poisson (FYI, I got most of the code from internet :slight_smile: ). My input data size (x_) is [20000, 88] and my output data size (y_) is [20000,]. I could be able to run the code when the distributions are Normal. Here is the code that I am running. I tried to look at the similar issues other had but couldn’t apply it to my problem.

import pyro
from pyro.distributions import Normal, Categorical, Poisson
from pyro.infer import SVI, Trace_ELBO
from pyro.optim import Adam

class LogisticRegression(torch.nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LogisticRegression, self).__init__()
        self.fc1 = torch.nn.Linear(input_dim, output_dim)

    def forward(self, x):
        outputs = self.fc1(x)
        return outputs

input_size = len(df.columns) - 1
net = LogisticRegression(input_size,7)
net = net.float()

log_softmax = nn.LogSoftmax(dim=1)

def model(x_data, y_data):
    
    fc1w_prior = Poisson(rate=torch.ones_like(net.fc1.weight))
    fc1b_prior = Poisson(rate=torch.ones_like(net.fc1.bias))
    
    priors = {'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior}
    # lift module parameters to random variables sampled from the priors
    lifted_module = pyro.random_module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module()
    
    lhat = log_softmax(lifted_reg_model(x_data))
    
    pyro.sample("obs", Categorical(logits=lhat), obs=y_data)

def guide(x_data, y_data):
    
    # First layer weight distribution priors
    fc1w_rate = torch.randn_like(net.fc1.weight)
    fc1w_rate_param = softplus(pyro.param("fc1w_rate", fc1w_rate))
    fc1w_prior = Poisson(rate=fc1w_rate_param)
    # First layer bias distribution priors
    fc1b_rate = torch.randn_like(net.fc1.bias)
    fc1b_rate_param = softplus(pyro.param("fc1b_rate", fc1b_rate))
    fc1b_prior = Poisson(rate=fc1b_rate_param).independent(1)
    priors = {'fc1.weight': fc1w_prior, 'fc1.bias': fc1b_prior}
    
    lifted_module = pyro.random_module("module", net, priors)
    
    return lifted_module()

optim = Adam({"lr": 0.001})
svi = SVI(model, guide, optim, loss=Trace_ELBO())

target_col = 'dependent_feature'
num_iterations = 250
loss = 0
batch_size = 10000
batch_list = [i for i in range(0,len(df)+1,batch_size)]

for j in range(num_iterations):
    loss = 0
    for i in range(len(batch_list)-1):
        data = df.iloc[batch_list[i]:batch_list[i+1]]
        x_ = torch.from_numpy(data.drop([target_col], axis=1).values).float()
        y_ = torch.from_numpy(data[target_col].values).float()
        # calculate the loss and take a gradient step
        loss += svi.step(x_, y_)
    normalizer_train = len(temp1_p)
    total_epoch_loss_train = loss / normalizer_train
    
    print("Epoch ", j, " Loss ", total_epoch_loss_train)

I am getting the following error:

The size of tensor a (88) must match the size of tensor b (7) at non-singleton dimension 1

And here is the complete error code,

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<command-726574161117256> in <module>()
     12         y_ = torch.from_numpy(data[target_col].values).float()
     13         # calculate the loss and take a gradient step
---> 14         loss += svi.step(x_, y_)
     15     normalizer_train = len(temp1_p)
     16     total_epoch_loss_train = loss / normalizer_train

/databricks/python/lib/python3.5/site-packages/pyro/infer/svi.py in step(self, *args, **kwargs)
     96         # get loss and compute gradients
     97         with poutine.trace(param_only=True) as param_capture:
---> 98             loss = self.loss_and_grads(self.model, self.guide, *args, **kwargs)
     99 
    100         params = set(site["value"].unconstrained()

/databricks/python/lib/python3.5/site-packages/pyro/infer/trace_elbo.py in loss_and_grads(self, model, guide, *args, **kwargs)
    122         # grab a trace from the generator
    123         for model_trace, guide_trace in self._get_traces(model, guide, *args, **kwargs):
--> 124             loss_particle, surrogate_loss_particle = self._differentiable_loss_particle(model_trace, guide_trace)
    125             loss += loss_particle / self.num_particles
    126 

/databricks/python/lib/python3.5/site-packages/pyro/infer/trace_elbo.py in _differentiable_loss_particle(self, model_trace, guide_trace)
     91                 if not is_identically_zero(score_function_term):
     92                     if log_r is None:
---> 93                         log_r = _compute_log_r(model_trace, guide_trace)
     94                     site = log_r.sum_to(site["cond_indep_stack"])
     95                     surrogate_elbo_particle = surrogate_elbo_particle + (site * score_function_term).sum()

/databricks/python/lib/python3.5/site-packages/pyro/infer/trace_elbo.py in _compute_log_r(model_trace, guide_trace)
     18             if not model_site["is_observed"]:
     19                 log_r_term = log_r_term - guide_trace.nodes[name]["log_prob"]
---> 20             log_r.add((stacks[name], log_r_term.detach()))
     21     return log_r
     22 

/databricks/python/lib/python3.5/site-packages/pyro/infer/util.py in add(self, *items)
    110             assert all(f.dim < 0 and -value.dim() <= f.dim for f in frames)
    111             if frames in self:
--> 112                 self[frames] = self[frames] + value
    113             else:
    114                 self[frames] = value

RuntimeError: The size of tensor a (88) must match the size of tensor b (7) at non-singleton dimension 1

Note: I also removed the independent(1) but I got the same error.

@neerajprad Can you please help me with this issue? Thanks!

Hi, you need to add .to_event calls to your weight/bias priors so that the distributions have the same event_shape as the various nn.Module parameters:

def model(...):
    ...
    fc1w_prior = Poisson(rate=torch.ones_like(net.fc1.weight)).to_event(2)
    fc1b_prior = Poisson(rate=torch.ones_like(net.fc1.bias)).to_event(1)
...
def guide(...):
    ...
    fc1w_prior = Poisson(rate=fc1w_rate_param).to_event(2)
    fc1b_prior = Poisson(rate=fc1b_rate_param).to_event(1)
    ...

@eb8680_2 Thanks a lot for the response! That solved the issue but got new error, name 'softplus' is not defined. I removed the softplus and it could run but the loss values are all NaN as I assume because the parameter should be positive. How can I add the constraint to be positive?
I also tried pyro.param(“fc1w_rate”, fc1w_rate, constraint=constraints.positive) but got the following error:
name 'constraints' is not defined.

You’ll need to import softplus and constraints from PyTorch:

from torch.nn.functional import softplus
from torch.distributions import constraints

Thanks a lot! That solved the issue.