pandasai / mylab /tab1_works.py
DrishtiSharma's picture
Create tab1_works.py
39a3df1 verified
import streamlit as st
import pandas as pd
import plotly.express as px
from pandasai import Agent
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_openai import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.schema import Document
import os
import re
# Set title
st.title("Data Analyzer")
# API keys
api_key = os.getenv("OPENAI_API_KEY")
pandasai_api_key = os.getenv("PANDASAI_API_KEY")
if not api_key or not pandasai_api_key:
st.warning("API keys for OpenAI or PandasAI are missing. Ensure both keys are set in environment variables.")
# Add session reset button
if st.button("Reset Session"):
for key in list(st.session_state.keys()):
del st.session_state[key]
st.experimental_rerun()
# Function to validate and clean dataset
def validate_and_clean_dataset(df):
# Rename columns for consistency
df.columns = [col.strip().lower().replace(" ", "_") for col in df.columns]
# Check for missing values
if df.isnull().values.any():
st.warning("Dataset contains missing values. Consider cleaning the data.")
return df
# Function to load datasets into session
def load_dataset_into_session():
input_option = st.radio(
"Select Dataset Input:",
["Use Repo Directory Dataset", "Use Hugging Face Dataset", "Upload CSV File"],
)
# Option 1: Load dataset from the repo directory
if input_option == "Use Repo Directory Dataset":
file_path = "./source/test.csv"
if st.button("Load Dataset"):
try:
st.session_state.df = pd.read_csv(file_path)
st.session_state.df = validate_and_clean_dataset(st.session_state.df)
st.success(f"File loaded successfully from '{file_path}'!")
except Exception as e:
st.error(f"Error loading dataset from the repo directory: {e}")
# Option 2: Load dataset from Hugging Face
elif input_option == "Use Hugging Face Dataset":
dataset_name = st.text_input(
"Enter Hugging Face Dataset Name:", value="HUPD/hupd"
)
if st.button("Load Hugging Face Dataset"):
try:
from datasets import load_dataset
dataset = load_dataset(dataset_name, split="train", trust_remote_code=True)
if hasattr(dataset, "to_pandas"):
st.session_state.df = dataset.to_pandas()
else:
st.session_state.df = pd.DataFrame(dataset)
st.session_state.df = validate_and_clean_dataset(st.session_state.df)
st.success(f"Hugging Face Dataset '{dataset_name}' loaded successfully!")
except Exception as e:
st.error(f"Error loading Hugging Face dataset: {e}")
# Option 3: Upload CSV File
elif input_option == "Upload CSV File":
uploaded_file = st.file_uploader("Upload a CSV File:", type=["csv"])
if uploaded_file:
try:
st.session_state.df = pd.read_csv(uploaded_file)
st.session_state.df = validate_and_clean_dataset(st.session_state.df)
st.success("File uploaded successfully!")
except Exception as e:
st.error(f"Error reading uploaded file: {e}")
load_dataset_into_session()
# Check if the dataset and API keys are loaded
if "df" in st.session_state and api_key and pandasai_api_key:
# Set API keys
os.environ["OPENAI_API_KEY"] = api_key
os.environ["PANDASAI_API_KEY"] = pandasai_api_key
df = st.session_state.df
st.write("Dataset Preview:")
st.write(df.head()) # Ensure the dataset preview is displayed only once
# Set up PandasAI Agent
try:
agent = Agent(df)
st.info("PandasAI Agent initialized successfully.")
except Exception as e:
st.error(f"Error initializing PandasAI Agent: {str(e)}")
# Convert dataframe into documents
try:
documents = [
Document(
page_content=", ".join([f"{col}: {row[col]}" for col in df.columns]),
metadata={"index": index}
)
for index, row in df.iterrows()
]
st.info("Documents created successfully for RAG.")
except Exception as e:
st.error(f"Error creating documents for RAG: {str(e)}")
# Set up RAG
try:
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(documents, embeddings)
retriever = vectorstore.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=ChatOpenAI(),
chain_type="stuff",
retriever=retriever
)
st.info("RAG setup completed successfully.")
except Exception as e:
st.error(f"Error setting up RAG: {str(e)}")
# Create tabs
tab1, tab2, tab3 = st.tabs(["PandasAI Analysis", "RAG Q&A", "Data Visualization"])
with tab1:
st.subheader("Data Analysis with PandasAI")
pandas_question = st.text_input("Ask a question about the dataset (PandasAI):")
if pandas_question:
try:
result = agent.chat(pandas_question)
st.write("PandasAI Answer:", result)
if hasattr(agent, "last_output"):
st.write("PandasAI Intermediate Output:", agent.last_output)
except Exception as e:
st.error(f"PandasAI encountered an error: {str(e)}")
# Fallback: Direct pandas filtering
if "patent_number" in pandas_question.lower() and "decision" in pandas_question.lower():
try:
match = re.search(r'\d{7,}', pandas_question)
if match:
patent_number = match.group()
decision = df.loc[df['patent_number'] == int(patent_number), 'decision']
st.write(f"Fallback Answer: The decision for patent {patent_number} is '{decision.iloc[0]}'.")
else:
st.write("Could not extract patent number from the query.")
except Exception as fallback_error:
st.error(f"Fallback processing failed: {fallback_error}")
with tab2:
st.subheader("Q&A with RAG")
rag_question = st.text_input("Ask a question about the dataset (RAG):")
if rag_question:
try:
result = qa_chain.run(rag_question)
st.write("RAG Answer:", result)
except Exception as e:
st.error(f"RAG encountered an error: {str(e)}")
with tab3:
st.subheader("Data Visualization")
viz_question = st.text_input("What kind of graph would you like? (e.g., 'Show a scatter plot of salary vs experience')")
if viz_question:
try:
result = agent.chat(viz_question)
code_pattern = r'```python\n(.*?)\n```'
code_match = re.search(code_pattern, result, re.DOTALL)
if code_match:
viz_code = code_match.group(1)
exec(viz_code)
else:
st.write("Unable to generate the graph. Showing fallback example.")
fig = px.scatter(df, x=df.columns[0], y=df.columns[1])
st.plotly_chart(fig)
except Exception as e:
st.error(f"An error occurred during visualization: {str(e)}")
else:
if not api_key:
st.warning("Please set the OpenAI API key in environment variables.")
if not pandasai_api_key:
st.warning("Please set the PandasAI API key in environment variables.")