HMM tutorial with DiscreteHMM converges to wrong "AutoDelta.probs_x" values

HMM tutorial (Example: Hidden Markov Models — Pyro Tutorials 1.8.4 documentation)

Models 5 and 7 are supposed to be identical but I get different values for “AutoDelta.probs_x” parameter. For model_7 the values are symmetric along diagonal which seems to be strange?

$ python -i hmm.py -m 7
>>> pyro.get_param_store()["AutoDelta.probs_x"]
tensor([[0.9670, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.9670, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.9670, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.9670, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.9670, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.9670, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.9670, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.9670, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.9670,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.9670, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.9670, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.9670, 0.0022, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.9670, 0.0022, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.9670, 0.0022, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.9670, 0.0022],
        [0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022,
         0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.0022, 0.9670]],
       grad_fn=<DivBackward0>)

Here is the output from model 5:

$ python -i hmm.py -m 5
>>> pyro.get_param_store()["AutoDelta.probs_x"]
tensor([[0.0572, 0.2137, 0.0789, 0.0425, 0.0422, 0.1045, 0.0193, 0.0279, 0.0427,
         0.0238, 0.0175, 0.0769, 0.1616, 0.0253, 0.0453, 0.0207],
        [0.0518, 0.2075, 0.0540, 0.0329, 0.0482, 0.0868, 0.0360, 0.0380, 0.0650,
         0.0522, 0.0519, 0.0582, 0.0895, 0.0312, 0.0732, 0.0237],
        [0.0507, 0.0468, 0.1489, 0.0441, 0.0469, 0.0866, 0.0430, 0.0485, 0.0654,
         0.0532, 0.0608, 0.0675, 0.0913, 0.0411, 0.0780, 0.0273],
        [0.0542, 0.0378, 0.0694, 0.1445, 0.0495, 0.0729, 0.0414, 0.0506, 0.0606,
         0.0590, 0.0542, 0.0658, 0.0861, 0.0396, 0.0878, 0.0267],
        [0.0788, 0.0507, 0.0549, 0.0375, 0.1150, 0.0771, 0.0389, 0.0399, 0.0671,
         0.0856, 0.0533, 0.0535, 0.0834, 0.0333, 0.1024, 0.0286],
        [0.0524, 0.0529, 0.0540, 0.0372, 0.0459, 0.1588, 0.0455, 0.0520, 0.0697,
         0.0610, 0.0794, 0.0624, 0.0897, 0.0346, 0.0752, 0.0293],
        [0.0490, 0.0401, 0.0631, 0.0383, 0.0442, 0.0899, 0.1646, 0.0483, 0.0610,
         0.0539, 0.0564, 0.0664, 0.0850, 0.0326, 0.0830, 0.0242],
        [0.0505, 0.0336, 0.0560, 0.0398, 0.0422, 0.0894, 0.0429, 0.1694, 0.0524,
         0.0636, 0.0741, 0.0667, 0.0735, 0.0329, 0.0874, 0.0256],
        [0.0532, 0.0465, 0.0563, 0.0371, 0.0484, 0.0879, 0.0422, 0.0439, 0.1789,
         0.0581, 0.0652, 0.0590, 0.0859, 0.0341, 0.0760, 0.0273],
        [0.0710, 0.0340, 0.0464, 0.0343, 0.0563, 0.0780, 0.0404, 0.0515, 0.0534,
         0.1799, 0.0741, 0.0549, 0.0673, 0.0280, 0.1041, 0.0264],
        [0.0465, 0.0308, 0.0424, 0.0284, 0.0389, 0.0746, 0.0333, 0.0422, 0.0508,
         0.0576, 0.3159, 0.0516, 0.0695, 0.0256, 0.0711, 0.0209],
        [0.0525, 0.0459, 0.0621, 0.0391, 0.0456, 0.0883, 0.0449, 0.0548, 0.0607,
         0.0595, 0.0749, 0.1358, 0.0910, 0.0379, 0.0805, 0.0265],
        [0.0514, 0.0647, 0.0632, 0.0389, 0.0488, 0.0933, 0.0425, 0.0436, 0.0724,
         0.0509, 0.0673, 0.0678, 0.1577, 0.0418, 0.0677, 0.0280],
        [0.0495, 0.0393, 0.0604, 0.0373, 0.0459, 0.0788, 0.0378, 0.0428, 0.0568,
         0.0519, 0.0491, 0.0619, 0.0841, 0.2027, 0.0780, 0.0237],
        [0.0723, 0.0342, 0.0492, 0.0411, 0.0589, 0.0706, 0.0485, 0.0600, 0.0509,
         0.1030, 0.0733, 0.0536, 0.0629, 0.0330, 0.1595, 0.0289],
        [0.0510, 0.0376, 0.0552, 0.0349, 0.0456, 0.0846, 0.0359, 0.0424, 0.0566,
         0.0561, 0.0547, 0.0590, 0.0761, 0.0300, 0.0792, 0.2011]],
       grad_fn=<DivBackward0>)