Implementation of Bayesian Thurstonian model

Hi,

I have been working implementing cognitive response model that is implemented in JAGS. I have been struggling to mimic “dinterval” function in JAGS. I also recognize that for loops can be efficient using vectorization in Numpyro.

I attempted to implement it, however, the model is not behaving as expected.

I would appreciate for your help on this implementation.

JAGS code:

# Define the JAGS model as a string
modelString <- "
model {
  # Latent truth
  for (j in 1:nItems) {
    muTmp[j] ~ dunif(-100, 100)
  }
  muMean <- mean(muTmp[1:nItems])
  for (j in 1:nItems) {
    mu[j] <- muTmp[j] - muMean
  }
  
  # Expertise
  for (i in 1:nPeople) {
    sigmaTmp[i] ~ dunif(0, 1)
  }
  sigmaMean <- mean(sigmaTmp[1:nPeople])
  for (i in 1:nPeople) {
    sigma[i] <- sigmaTmp[i] / sigmaMean
  }
  
  # Data
  for (i in 1:nRankings) {
    for (j in 1:nItems) {
      x[i, j] ~ dnorm(mu[j], pow(sigma[person[i]], -2))
    }
    
    # Define rankings once per ranking 'i'
    rankings[i, 1:setN[i]] <- sort(x[i, set[i, 1:setN[i]]])
    
    for (k in 1:setN[i]) {
      y[i, set[i, k]] ~ dinterval(x[i, set[i, k]], rankings[i, 1:setN[i]])
    }
  }
}
"
def model(nPeople, nItems, nRankings, person, set_array, setN, y):
    # Latent truth
    muTmp = numpyro.sample('muTmp', dist.Uniform(-100, 100).expand([nItems]))
    mu = muTmp - jnp.mean(muTmp)
    numpyro.deterministic('mu', mu)

    # Expertise
    sigmaTmp = numpyro.sample('sigmaTmp', dist.Uniform(0, 1).expand([nPeople]))
    sigma = sigmaTmp / jnp.mean(sigmaTmp)
    numpyro.deterministic('sigma', sigma)
        
    # Data likelihood
    for i in range(nRankings):
        p = person[i]  # Person index (0-based)
        s_i = set_array[i]  # Indices of items in ranking i (0-based)
        n_i = setN[i]       # Number of items in ranking i

        # Remove missing items (-1) from s_i
        s_i = s_i[s_i >= 0]
        n_i = len(s_i)

        # Latent utilities for the items in s_i
        mu_s = mu[s_i]
        sigma_p = sigma[p]

        # Sample latent variables x_i_s for items in s_i
        x_i_s = numpyro.sample(f'x_{i}', dist.Normal(mu_s, sigma_p).to_event(1))

        # Observed rankings y[i, s_i]
        y_i_s = y[i, s_i]  # Observed rankings for items in s_i

        # Get the ordering of items according to observed ranks
        ordering = jnp.argsort(y_i_s)

        # Compute the Plackett-Luce likelihood
        v_i_s = jnp.exp(x_i_s)
        log_likelihood = 0.0

        for k in range(n_i):
            item_k = ordering[k]
            sum_v = jnp.sum(v_i_s[ordering[k:]])
            log_likelihood += jnp.log(v_i_s[item_k]) - jnp.log(sum_v)

        # Add the log likelihood to the model
        numpyro.factor(f'pl_likelihood_{i}', log_likelihood)