docqa-with-deepseek-r1 / lab /persistence_issue_persists_v1.py
DrishtiSharma's picture
Rename lab/persistence_issue_fixed_v1.py to lab/persistence_issue_persists_v1.py
e0de377 verified
raw
history blame
5.98 kB
import os
import chromadb
import requests
import streamlit as st
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_groq import ChatGroq
from langchain.document_loaders import PDFPlumberLoader
from langchain_experimental.text_splitter import SemanticChunker
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from prompts import rag_prompt, relevancy_prompt, relevant_context_picker_prompt, response_synth
# Set API Keys
os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
# Load LLM models
llm_judge = ChatGroq(model="deepseek-r1-distill-llama-70b")
rag_llm = ChatGroq(model="mixtral-8x7b-32768")
llm_judge.verbose = True
rag_llm.verbose = True
# Clear ChromaDB cache to fix tenant issue
chromadb.api.client.SharedSystemClient.clear_system_cache()
st.title("Blah")
# **Initialize session state variables**
if "pdf_path" not in st.session_state:
st.session_state.pdf_path = None
if "pdf_loaded" not in st.session_state:
st.session_state.pdf_loaded = False
if "chunked" not in st.session_state:
st.session_state.chunked = False
if "vector_created" not in st.session_state:
st.session_state.vector_created = False
if "vector_store_path" not in st.session_state:
st.session_state.vector_store_path = "./chroma_langchain_db"
if "vector_store" not in st.session_state:
st.session_state.vector_store = None
if "documents" not in st.session_state:
st.session_state.documents = None
# Step 1: Choose PDF Source
pdf_source = st.radio("Upload or provide a link to a PDF:", ["Upload a PDF file", "Enter a PDF URL"], index=0, horizontal=True)
if pdf_source == "Upload a PDF file":
uploaded_file = st.file_uploader("Upload your PDF file", type="pdf")
if uploaded_file:
st.session_state.pdf_path = "temp.pdf"
with open(st.session_state.pdf_path, "wb") as f:
f.write(uploaded_file.getbuffer())
st.session_state.pdf_loaded = False
st.session_state.chunked = False
st.session_state.vector_created = False
elif pdf_source == "Enter a PDF URL":
pdf_url = st.text_input("Enter PDF URL:", value="https://arxiv.org/pdf/2406.06998")
if pdf_url and st.session_state.pdf_path is None:
with st.spinner("Downloading PDF..."):
try:
response = requests.get(pdf_url)
if response.status_code == 200:
st.session_state.pdf_path = "temp.pdf"
with open(st.session_state.pdf_path, "wb") as f:
f.write(response.content)
st.session_state.pdf_loaded = False
st.session_state.chunked = False
st.session_state.vector_created = False
st.success("βœ… PDF Downloaded Successfully!")
else:
st.error("❌ Failed to download PDF. Check the URL.")
except Exception as e:
st.error(f"Error downloading PDF: {e}")
# Step 2: Process PDF
if st.session_state.pdf_path and not st.session_state.pdf_loaded:
with st.spinner("Loading and processing PDF..."):
loader = PDFPlumberLoader(st.session_state.pdf_path)
docs = loader.load()
st.session_state.documents = docs
st.session_state.pdf_loaded = True
st.success(f"βœ… **PDF Loaded!** Total Pages: {len(docs)}")
# Step 3: Chunking
if st.session_state.pdf_loaded and not st.session_state.chunked and st.session_state.documents:
with st.spinner("Chunking the document..."):
model_name = "nomic-ai/modernbert-embed-base"
embedding_model = HuggingFaceEmbeddings(model_name=model_name, model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': False})
text_splitter = SemanticChunker(embedding_model)
documents = text_splitter.split_documents(st.session_state.documents)
st.session_state.documents = documents # Store chunked docs
st.session_state.chunked = True
st.success(f"βœ… **Document Chunked!** Total Chunks: {len(documents)}")
# Step 4: Setup Vectorstore
if st.session_state.chunked and not st.session_state.vector_created:
with st.spinner("Creating vector store..."):
vector_store = Chroma(
collection_name="deepseek_collection",
collection_metadata={"hnsw:space": "cosine"},
embedding_function=embedding_model,
persist_directory=st.session_state.vector_store_path
)
vector_store.add_documents(st.session_state.documents)
num_documents = len(vector_store.get()["documents"])
st.session_state.vector_store = vector_store
st.session_state.vector_created = True
st.success(f"βœ… **Vector Store Created!** Total documents stored: {num_documents}")
# Step 5: Query Input
if st.session_state.vector_created and st.session_state.vector_store:
query = st.text_input("πŸ” Enter a Query:")
if query:
with st.spinner("Retrieving relevant contexts..."):
retriever = st.session_state.vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
contexts = retriever.invoke(query)
context_texts = [doc.page_content for doc in contexts]
st.success(f"βœ… **Retrieved {len(context_texts)} Contexts!**")
for i, text in enumerate(context_texts, 1):
st.write(f"**Context {i}:** {text[:500]}...")
# **Step 6: Generate Final Response**
with st.spinner("Generating the final answer..."):
final_prompt = PromptTemplate(input_variables=["query", "context"], template=rag_prompt)
response_chain = LLMChain(llm=rag_llm, prompt=final_prompt, output_key="final_response")
final_response = response_chain.invoke({"query": query, "context": context_texts})
st.subheader("πŸŸ₯ RAG Final Response")
st.success(final_response['final_response'])