Help post,shape problem

elbo = TraceMeanField_ELBO(num_particles=10,vectorize_particles=True),Error
vectorize_particles set to False,How to solve this problem

Traceback (most recent call last):
File “/home/tranmap/miniconda3/envs/lq/lib/python3.6/site-packages/pyro/poutine/trace_messenger.py”, line 174, in call
ret = self.fn(*args, **kwargs)
File “/home/tranmap/miniconda3/envs/lq/lib/python3.6/site-packages/pyro/poutine/messenger.py”, line 12, in _context_wrap
return fn(*args, **kwargs)
File “/home/tranmap/miniconda3/envs/lq/lib/python3.6/site-packages/pyro/poutine/messenger.py”, line 12, in _context_wrap
return fn(*args, **kwargs)
File “/home/tranmap/miniconda3/envs/lq/lib/python3.6/site-packages/pyro/nn/module.py”, line 426, in call
return super().call(*args, **kwargs)
File “/home/tranmap/miniconda3/envs/lq/lib/python3.6/site-packages/torch/nn/modules/module.py”, line 1102, in _call_impl
return forward_call(*input, **kwargs)
File “/home/tranmap/liquan/万寿西宫/模型/火焰兵/第二种.py”, line 139, in forward
self.lstm.flatten_parameters()
File “/home/tranmap/miniconda3/envs/lq/lib/python3.6/site-packages/torch/nn/modules/rnn.py”, line 179, in flatten_parameters
self.batch_first, bool(self.bidirectional))
RuntimeError: shape ‘[2400, 1]’ is invalid for input of size 24000

Here’s my model
class BayesianLSTM(PyroModule):
def init(self, input_size, hidden_size=50, num_layers=2, dropout_prob=0.2):
super().init()
self.hidden_size = hidden_size
self.num_layers = num_layers

    # 使用PyTorch内置LSTM
    self.lstm = PyroModule[nn.LSTM](
        input_size=input_size,
        hidden_size=hidden_size,
        num_layers=num_layers,
        dropout=dropout_prob,
        batch_first=True
    )

    # 收集所有参数名称
    param_names = list(self.lstm.named_parameters())

    # 现在为每个参数设置先验分布
    for name, param in param_names:
        if 'weight_ih' in name:
            setattr(self.lstm, name, PyroSample(
                dist.Normal(0, torch.tensor(0.15, device=device)).expand(param.shape).to_event(param.dim())
            ))
        elif 'weight_hh' in name:
            setattr(self.lstm, name, PyroSample(
                dist.Normal(0, torch.tensor(0.1, device=device)).expand(param.shape).to_event(param.dim())
            ))
        elif 'bias' in name:
            setattr(self.lstm, name, PyroSample(
                dist.Normal(0, torch.tensor(0.05, device=device)).expand(param.shape).to_event(param.dim())
            ))
    # Dropout 层(额外在 LSTM 输出后)
    self.dropout = nn.Dropout(p=dropout_prob)
    # 改为普通 nn.Linear 层
    self.linear = nn.Linear(hidden_size, 1)
    # self.linear = PyroModule[nn.Linear](hidden_size, 1)
    # 设置线性层的贝叶斯参数
    # self.linear.weight = PyroSample(
    #     dist.Normal(0, torch.tensor(0.1, device=device)).expand(self.linear.weight.shape).to_event(
    #         self.linear.weight.dim())
    # )
    # self.linear.bias = PyroSample(
    #     dist.Normal(0, torch.tensor(0.05, device=device)).expand(self.linear.bias.shape).to_event(
    #         self.linear.bias.dim())
    # )

def forward(self, x, y=None,annealing_factor=1.0):
    # 确保参数连续性
    self.lstm.flatten_parameters()

    # 初始化隐藏状态
    h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size, device=device)
    c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size, device=device)

    lstm_out, _ = self.lstm(x, (h0, c0))
    dropout_out = self.dropout(lstm_out[:, -1, :])

    mu = self.linear(dropout_out).squeeze(-1)
    pyro.deterministic("mu", mu)
    if y is not None:
        #对数均值,对数标准差
        sigma = pyro.sample("sigma", dist.LogNormal(loc=torch.tensor(-1.0,device=device), scale=torch.tensor(0.5,device=device)))
        with pyro.plate("data", x.size(0)):
            pyro.sample("obs", dist.Normal(mu, sigma), obs=y.squeeze(-1),infer={"scale": annealing_factor})   #obs,真实数据+噪声
    return mu

And my loss is basically tens of thousands, what is the reason?