Abductive Reasoning and Probabilistic Programming

Hi!

I wondered where the best place to get started learning about how Abductive Reasoning has materialized in the Probabilistic Programming and Bayesian Inference worlds: any research papers or books not about Bayesian inferences and inductive reasoning? Should I read mainly about Beliefs revisions? I want to look at the math itself… I wonder how the Abduction, Deduction, and Induction cycle is being expressed and leveraged.

I was thinking something as follows:

  • Abduction: Generative models (e.g., GANs, LSTMs)
  • Deduction: Rule-based models (e.g., Decision Trees)
  • Induction: Statistical models (e.g., Random Forest)
  • Iterative Refinement: Reinforcement Learning models

For fun, I asked ChatGPT to program Adbudctive Reasoning with Pyro, and it spitted out something … fun! I’ve no experience with PyTorch or Pyro.

Sorry if that’s the weirdest/random/stupid question ever.

Here’s what ChatGPT printed out if curious :joy:

import torch
import pyro
import pyro.distributions as dist
import pyro.nn as pnn

class Attention(pnn.PyroModule):
    def __init__(self, hidden_size):
        super().__init__()
        self.fc = pnn.PyroModule[torch.nn.Linear](hidden_size, 1)
    
    def forward(self, rnn_output):
        attention_weights = torch.nn.functional.softmax(self.fc(rnn_output).squeeze(-1), dim=1)
        context_vector = torch.sum(attention_weights.unsqueeze(-1) * rnn_output, dim=1)
        return context_vector

class AbductiveReasoningModel(pnn.PyroModule):
    def __init__(self, num_explanations, num_categories, rnn_hidden_size):
        super().__init__()
        self.num_explanations = num_explanations
        self.num_categories = num_categories
        self.rnn = pnn.PyroModule[torch.nn.RNN](input_size=1, hidden_size=rnn_hidden_size, batch_first=True)
        self.attention = Attention(rnn_hidden_size)
    
    def forward(self, observations):
        # Temporal reasoning using an RNN
        rnn_output, _ = self.rnn(observations.unsqueeze(-1))
        context_vector = self.attention(rnn_output)
        
        # Hierarchical model: sample a category first
        category_probs = pyro.param("category_probs", torch.ones(self.num_categories) / self.num_categories, constraint=dist.constraints.simplex)
        category = pyro.sample("category", dist.Categorical(category_probs))
        
        # Explanation probabilities depend on the chosen category
        explanation_probs = pyro.param(f"explanation_probs_{category}", torch.ones(self.num_explanations) / self.num_explanations, constraint=dist.constraints.simplex)
        
        # Sample an explanation based on the category
        explanation = pyro.sample("explanation", dist.Categorical(explanation_probs))
        
        # Simple Causal Model: Introduce a latent variable that affects the observations
        latent_factor = pyro.sample("latent", dist.Normal(0, 1))
        causal_effect = context_vector + latent_factor
        
        # Placeholder: Combine the latent factor, attention context, and explanation.
        combined_score = torch.dot(causal_effect, explanation.float())
        
        return combined_score

# Placeholder data
observations = torch.tensor([0.1, 0.2, 0.3, 0.4])
num_explanations = 5
num_categories = 3
rnn_hidden_size = 10

model = AbductiveReasoningModel(num_explanations, num_categories, rnn_hidden_size)

# Placeholder guide for variational inference
def guide():
    category_probs = pyro.param("category_probs", torch.ones(num_categories) / num_categories, constraint=dist.constraints.simplex)
    category = pyro.sample("category", dist.Categorical(category_probs))
    
    explanation_probs = pyro.param(f"explanation_probs_{category}", torch.ones(num_explanations) / num_explanations, constraint=dist.constraints.simplex)
    explanation = pyro.sample("explanation", dist.Categorical(explanation_probs))
    
    latent_factor = pyro.sample("latent", dist.Normal(0, 1))

# Placeholder loss and optimization
loss = pyro.infer.Trace_ELBO().differentiable_loss
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)

# Human Feedback: Placeholder function to adjust model based on feedback.
def adjust_model_based_on_feedback(feedback):
    # This is a simple placeholder. In a real-world scenario, feedback could be used to adjust model parameters, retrain the model, or inform future inferences.
    if feedback == "increase_attention":
        for param in model.attention.parameters():
            param.data += 0.01
    elif feedback == "decrease_attention":
        for param in model.attention.parameters():
            param.data -= 0.01