# Import necessary libraries import numpy as np import pandas as pd import streamlit as st from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from lime.lime_tabular import LimeTabularExplainer # Load dataset data = load_iris() X = data.data y = data.target # Split dataset into training and testing sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # Train a Random Forest classifier model = RandomForestClassifier(n_estimators=100, random_state=42) model.fit(X_train, y_train) # Create an explainer using LIME explainer = LimeTabularExplainer(X_train, mode='classification', training_labels=y_train, feature_names=data.feature_names, class_names=data.target_names, discretize_continuous=True) # Streamlit UI st.title("Explainable AI with LIME") st.write("This application demonstrates how to make AI models more interpretable using LIME.") # User input for test instance index idx = st.number_input("Select a test instance index to explain", min_value=0, max_value=len(X_test)-1, value=0) # Choose a test instance to explain instance = X_test[idx].reshape(1, -1) # Get the explanation for the chosen instance exp = explainer.explain_instance(instance[0], model.predict_proba) # Display the explanation st.write(f"Explanation for instance {idx}:") st.write(exp.as_list())