How to store parameter values and gradient norms during training

I want to plot the evolution of the parameters during training but I’m using the following register_hook code that I found in the documentation here:

# Register hooks to monitor gradient norms.
gradient_norms = defaultdict(list)
for name, value in pyro.get_param_store().named_parameters():
    value.register_hook(
        lambda g, name = name: gradient_norms[name].append(g.norm().item())
    )

I then decided to add the following lines to record the evolution of the parameter values (as shown in this example):

losses, params  = [], []
for i in range(200 if not smoke_test else 2):
    loss = svi.step(data)
    losses.append(loss)
    p = [pyro.param(name) for name in ['weights', 'locs', 'scales']]
    params.append(p)
    print('.' if i % 100 else '\n', end='')

This raises the following:

---------------------------------------------------------------------------
KeyError                                  Traceback (most recent call last)
/var/folders/n_/crg11f15515bmttss59gmbn40000gn/T/ipykernel_67461/2530619780.py in <module>
     10     loss = svi.step(data)
     11     losses.append(loss)
---> 12     p = [pyro.param(name) for name in ['weights', 'locs', 'scales']]
     13     params.append(p)
     14     print('.' if i % 100 else '\n', end='')

/var/folders/n_/crg11f15515bmttss59gmbn40000gn/T/ipykernel_67461/2530619780.py in <listcomp>(.0)
     10     loss = svi.step(data)
     11     losses.append(loss)
---> 12     p = [pyro.param(name) for name in ['weights', 'locs', 'scales']]
     13     params.append(p)
     14     print('.' if i % 100 else '\n', end='')

~/anaconda3/envs/torch/lib/python3.9/site-packages/pyro/primitives.py in param(name, init_tensor, constraint, event_dim)
     76     # Note effectful(-) requires the double passing of name below.
     77     args = (name,) if init_tensor is None else (name, init_tensor)
---> 78     return _param(*args, constraint=constraint, event_dim=event_dim, name=name)
     79 
     80 

~/anaconda3/envs/torch/lib/python3.9/site-packages/pyro/poutine/runtime.py in _fn(*args, **kwargs)
    261 
    262         if not am_i_wrapped():
--> 263             return fn(*args, **kwargs)
    264         else:
    265             msg = {

~/anaconda3/envs/torch/lib/python3.9/site-packages/pyro/params/param_store.py in get_param(self, name, init_tensor, constraint, event_dim)
    212         """
    213         if init_tensor is None:
--> 214             return self[name]
    215         else:
    216             return self.setdefault(name, init_tensor, constraint)

~/anaconda3/envs/torch/lib/python3.9/site-packages/pyro/params/param_store.py in __getitem__(self, name)
    103         Get the *constrained* value of a named parameter.
    104         """
--> 105         unconstrained_value = self._params[name]
    106 
    107         # compute the constrained value

KeyError: 'weights'

How can I store parameter values and gradient norms during training?

Oh, I see the problem. These are not parameters. They are sampled from distributions.