Dynamically change nn.module layers

I was wondering if there was a way to dynamically change and update nn.Linear dimensions. For example in pytorch I can do:

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        self.weight = nn.Parameter(torch.randn(10, 2))
        
    def forward(self, x):
        x = F.linear(x, self.weight)
        return x

model = MyModel()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-1)
x = torch.randn(1, 2)

output = model(x)
output.mean().backward()
print(model.weight.grad)

criterion = nn.MSELoss()
x = torch.randn(1, 2)
target = torch.randn(1, 10)

# Train for a few epochs
for epoch in range(10):
    optimizer.zero_grad()
    output = model(x)
    loss = criterion(output, target)
    loss.backward()
    optimizer.step()
    print('Epoch {}, loss {}'.format(epoch, loss.item()))

# Add another input feature
with torch.no_grad():
    model.weight = nn.Parameter(torch.cat((model.weight, torch.randn(10, 1)), 1))


x = torch.randn(1, 3)
output = model(x)
output.mean().backward()
print(model.weight.grad)
model.zero_grad()

optimizer = optim.Adam(model.parameters(), lr=1e-1)

#Continue Training

I can probably do the same thing in Pyro, however, how can I incorporate model() and guide() for this if I want to use SVI ?