@fehiepsi @fritzo

Hey there!

This is our implementation so far, we took the Gaussian Mixture Model example as inspiration and have resolved the shape problems we initially had. Inference - or at least the SVI loop - seems to go through, however I do not see that the model has “learned” any parameters so far. The result seems to be a rather random membership vector Z which is also shown in the adjusted rand score we calculate in the end.

The main questions we have are basically

- Have we inferred correctly? (Using infer descrete, etc.)
- Do we successfully sample the “learned” parameters in the end.

Note that we saved our values in lists called “output” for debugging purposes. Maybe this is also useful for you. Here is our code:

```
# Pyro Environment Settings
pyro.enable_validation(True)
pyro.set_rng_seed(1)
pyro.clear_param_store()
def model(data, K):
N = data.shape[0]
# Eta
with pyro.plate("eta_plate_1", K):
with pyro.plate("eta_plate_2", K):
eta_dist = dist.Beta(torch.ones([K, K]), torch.ones([K, K]))
eta = pyro.sample("eta", eta_dist)
# Membership Prior Pi
pi_dist = dist.Dirichlet(concentration=torch.ones([K]))
pi = pyro.sample("pi", pi_dist)
# Community Association Z
with pyro.plate("z_plate", N):
z_dist = dist.Categorical(pi)
z = pyro.sample("z", z_dist, infer={"enumerate": "parallel"})
# Adjacency Matrix A
with pyro.plate("a_plate_1", N):
with pyro.plate("a_plate_2", N):
a_dist = dist.Bernoulli(Vindex(Vindex(eta)[z, :])[:, z])
a = pyro.sample("a", a_dist, obs=data)
def guide(data, K):
N = data.shape[0]
# Eta
eta_1 = pyro.param("eta_1", torch.abs(torch.rand([K, K])), constraint=constraints.positive)
eta_2 = pyro.param("eta_2", torch.abs(torch.randn([K, K])), constraint=constraints.positive)
with pyro.plate("eta_plate_1", K):
with pyro.plate("eta_plate_2", K):
q_eta_dist = dist.Beta(eta_1, eta_2)
q_eta = pyro.sample("eta", q_eta_dist)
# Membership Prior Pi
pi_conc = pyro.param("pi_conc", torch.abs(torch.rand(K)), constraint=constraints.positive)
q_pi_dist = dist.Dirichlet(pi_conc)
q_pi = pyro.sample("pi", q_pi_dist)
# Community Association Z
with pyro.plate("z_plate", N):
q_z_dist = dist.Categorical(q_pi)
q_z = pyro.sample("z", q_z_dist)
# Adjacency Matrix A
with pyro.plate("a_plate_1", N):
with pyro.plate("a_plate_2", N):
q_a_dist = dist.Bernoulli(Vindex(Vindex(q_eta)[q_z, :])[:, q_z])
q_a = pyro.sample("a", q_a_dist)
# Data Set
testSet = 1
if testSet == 1:
A_observed, Z_true = karate("~~data")
A_observed = A_observed.astype(np.float32)
else:
A_observed = np.array([
[0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 0],
], dtype=np.float32)
Z_true = torch.tensor([1., 0., 1., 0., 1., 0., 1., 0., 1., 0.])
# Optimizer
adam_params = {"lr": 0.0005, "betas": (0.95, 0.999)}
adam_optimizer = Adam(adam_params)
# Tracing
trace = poutine.trace(model).get_trace(torch.tensor(A_observed), 2)
trace.compute_log_prob()
print(trace.format_shapes())
# Inference
svi = SVI(model,
guide,
adam_optimizer,
loss=TraceEnum_ELBO(max_plate_nesting=2)
)
# Learning
n_steps = 500
# Saving Values for Debugging`
output1=[]
output2=[]
output3=[]
output4=[]
output5=[]
for step in range(n_steps):
print("Loss: ", svi.step(data=torch.tensor(A_observed), K=2))
pi_curr = pyro.param("pi_conc")
eta_1_curr = pyro.param("eta_1")
eta_2_curr = pyro.param("eta_2")
eta_curr = pyro.sample("eta_curr", dist.Beta(eta_1_curr, eta_2_curr))
N = A_observed.shape[0]
z_curr = pyro.sample("z_curr", dist.Categorical(probs=pi_curr), sample_shape=([N]))
output1.append(pi_curr)
output2.append(eta_curr)
output3.append(z_curr)
output4.append(eta_1_curr)
output5.append(eta_2_curr)
guide_trace = poutine.trace(guide).get_trace(torch.tensor(A_observed), 2) # record the globals
trained_model = poutine.replay(model, trace=guide_trace) # replay the globals
def classifier(data, temperature=0):
inferred_model = infer_discrete(trained_model, temperature=temperature,
first_available_dim=-1) # avoid conflict with data plate
trace = poutine.trace(inferred_model).get_trace(data, 2)
return trace.nodes["z"]["value"]
print("Classifier: ", classifier(torch.tensor(A_observed)))
# Information Extraction
for name, value in pyro.get_param_store().items():
print(name, pyro.param(name))
print("Output 2 (Eta)", output2[-1])
pi_conc = pyro.param("pi_conc")
pi_calc = pyro.sample("pi_calc", dist.Dirichlet(concentration=pi_conc))
z_calc = pyro.sample("z_calc", dist.Categorical(probs=pi_calc), sample_shape=([N]))
print("z_calc", z_calc) # Looks good
print("ARS", adjusted_rand_score(z_calc, Z_true))
# Visualization
Z_pred = z_calc
G = nx.from_numpy_matrix(A_observed)
nx.draw(G, pos=nx.spring_layout(G), node_color=["red" if x==1 else "blue" for x in Z_pred])
plt.show()
nx.draw(G, pos=nx.spring_layout(G), node_color=["red" if x==1 else "blue" for x in Z_true])
plt.show()
```

Any help is greatly appreciated