How to calculate feature importances of bayesian neural network?

Hello everyone, I’m a new user of Pyro and have successfully implemented a Bayesian version of an Artificial Neural Network (ANN) with a notably high accuracy rate. Now, I’m eager to determine which features have the most significant impact on this model’s performance.

My question is: What’s the best approach to calculate the feature importance for this model? Does the Pyro library inherently provide support for this task, or should I consider using external tools such as the SHAP library or other explanation techniques?

Thank you in advance for your guidance!

Here is my code for ANN

class ANN(nn.Module):
    def __init__(self,infeature,hidden_feature,outfeature):
        super(ANN,self).__init__()
        assert type(hidden_feature)==dict
        
        self.createVar = locals(); #genereate variables
        
        self.inlayer=nn.Sequential(nn.Linear(infeature,hidden_feature[keys[0]]),
                                    nn.LeakyReLU())
        if len(keys)!=1:
            for i in range(1,len(keys)):
                self.add_module("hidden_{}".format(i),
                                nn.Linear(hidden_feature[keys[i-1]],hidden_feature[keys[i]]))
                self.add_module("relu_{}".format(i),nn.LeakyReLU())
                
        self.outlayer=nn.Linear(hidden_feature[keys[-1]],outfeature)
        
    def forward(self,x,y=None):
        x=self.inlayer(x)

        if len(keys)!=1:
            for i in range(1,len(keys)):
                x=eval("self.hidden_{}".format(i))(x)
                x=eval("self.relu_{}".format(i))(x)
        x=self.outlayer(x)
        
        return x

My code for introducing distribution into ANN.

def model(x,y):
    createVar=locals()
    myvarlist=["inw_prior","inb_prior"]
    inw_prior=Normal(0, 1).expand(net.inlayer[0].weight.shape).to_event(2)
    inb_prior=Normal(0, 1).expand(net.inlayer[0].bias.shape).to_event(1) #输入层
    # 隐藏层
    if len(hidden_feature)!=1:
        for i in range(1,len(hidden_feature)):
            createVar["hiddenw{}_prior".format(i)]=Normal(0, 1).expand(eval("net.hidden_{}".format(i)).weight.shape).to_event(2)
            myvarlist.append("hiddenw{}_prior".format(i))
                        
            
            createVar["hiddenb{}_prior".format(i)]=Normal(0, 1).expand(eval("net.hidden_{}".format(i)).bias.shape).to_event(1)
            myvarlist.append("hiddenb{}_prior".format(i))
            
    outw_prior=Normal(0, 1).expand(net.outlayer.weight.shape).to_event(2)#输出层
    outb_prior=Normal(0, 1).expand(net.outlayer.bias.shape).to_event(1)
    

    myvarlist.append("outw_prior")
    myvarlist.append("outb_prior") #构建先验
    
    priors={}
    for v in myvarlist:
        priors[v]=eval(v)
    lifted_module = pyro.module("module", net, priors)
    # sample a regressor (which also samples w and b)
    lifted_reg_model = lifted_module(x).squeeze() #根据当前的模型进行输出
   
    with pyro.plate("data_plate",device=device):
        pyro.sample("obs",Normal(lifted_reg_model,1), obs=y)