TejaCherukuri commited on
Commit
59e5e32
·
1 Parent(s): abac36c

add source code

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *venv
2
+ /faiss_index
app.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from src.pdf_processing import extract_pdf_text, split_text_into_chunks
3
+ from src.vector_store import create_and_save_vector_store
4
+ from src.query_handler import handle_user_query
5
+
6
+ # Initialize session state for chat history
7
+ def initialize_session_state():
8
+ if 'messages' not in st.session_state:
9
+ st.session_state.messages = []
10
+
11
+ def main():
12
+ """
13
+ Main function to run the Streamlit app.
14
+ """
15
+ initialize_session_state()
16
+
17
+ st.set_page_config("DocuChat")
18
+ st.header("DocuChat: Chat with your Document")
19
+ st.markdown("Source code available at [[GitHub]](https://github.com/TejaCherukuri/DocuChat)")
20
+
21
+ # Display previous chat messages
22
+ for message in st.session_state.messages:
23
+ with st.chat_message(message["role"]):
24
+ st.write(message["content"])
25
+
26
+ # Chat input for user questions
27
+ if prompt := st.chat_input("Ask a question about your document"):
28
+ st.session_state.messages.append({"role": "user", "content": prompt})
29
+
30
+ with st.chat_message("user"):
31
+ st.write(prompt)
32
+
33
+ with st.chat_message("assistant"):
34
+ with st.spinner("Thinking..."):
35
+ try:
36
+ response = handle_user_query(prompt)
37
+ st.write(response)
38
+
39
+ # Save assistant's response
40
+ st.session_state.messages.append({"role": "assistant", "content": response})
41
+ except Exception as e:
42
+ st.error(f"Error generating response: {str(e)}")
43
+
44
+ # Sidebar for PDF Upload
45
+ with st.sidebar:
46
+ st.title("Upload PDF 📂")
47
+ st.write("*This is for demonstration purposes. Do not submit any proprietary documents.*")
48
+ pdf_docs = st.file_uploader("Upload your PDF Files", accept_multiple_files=True)
49
+
50
+ if st.button("Process"):
51
+ if not pdf_docs:
52
+ st.error("Upload a PDF to start!")
53
+ return
54
+
55
+ with st.spinner("Processing, Chunking, and Caching..."):
56
+ raw_text = extract_pdf_text(pdf_docs)
57
+ text_chunks = split_text_into_chunks(raw_text)
58
+ create_and_save_vector_store(text_chunks)
59
+ st.success("Processing Done ✅")
60
+
61
+ if __name__ == "__main__":
62
+ main()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ langchain
3
+ langchain-text-splitters
4
+ langchain-community
5
+ langchain-huggingface
6
+ faiss-cpu
7
+ PyPDF2
8
+ sentence-transformers
9
+ google-generativeai
src/__pycache__/pdf_processing.cpython-310.pyc ADDED
Binary file (914 Bytes). View file
 
src/__pycache__/prompt_template.cpython-310.pyc ADDED
Binary file (863 Bytes). View file
 
src/__pycache__/query_handler.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
src/__pycache__/vector_store.cpython-310.pyc ADDED
Binary file (951 Bytes). View file
 
src/config.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import os
2
+ import google.generativeai as genai
3
+
4
+ # Load API Key for Gemini
5
+ genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
src/pdf_processing.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PyPDF2 import PdfReader
2
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
3
+
4
+ def extract_pdf_text(pdf_docs):
5
+ """
6
+ Extracts text from a list of uploaded PDF files.
7
+ """
8
+ text = ""
9
+ for pdf in pdf_docs:
10
+ pdf_reader = PdfReader(pdf)
11
+ for page in pdf_reader.pages:
12
+ text += page.extract_text()
13
+ return text
14
+
15
+ def split_text_into_chunks(text, chunk_size=10000, chunk_overlap=500):
16
+ """
17
+ Splits extracted text into smaller chunks for better processing.
18
+ """
19
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
20
+ return text_splitter.split_text(text)
src/prompt_template.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts import PromptTemplate
2
+
3
+ def create_prompt_template():
4
+ """
5
+ Creates a structured prompt for querying the Gemini model.
6
+ """
7
+ prompt_template = """
8
+ Answer the question as detailed as possible from the provided context.
9
+ If the answer contains structured data like tables or lists, respond in the same format.
10
+ If the answer is not in the provided context, say, "The answer is not available in the context."
11
+
12
+ Context:
13
+ {context}
14
+
15
+ Question:
16
+ {question}
17
+ """
18
+
19
+ return PromptTemplate(template=prompt_template, input_variables=['context', 'question'])
src/query_handler.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ from src.vector_store import load_vector_store
3
+ from src.prompt_template import create_prompt_template
4
+
5
+ def handle_user_query(user_question, index_name="faiss_index"):
6
+ """
7
+ Searches for relevant text in the vector store and generates a response using Gemini.
8
+ """
9
+ vector_store = load_vector_store(index_name)
10
+ docs = vector_store.similarity_search(user_question)
11
+
12
+ # Combine relevant document contents
13
+ context = "\n\n".join([doc.page_content for doc in docs])
14
+
15
+ # Format the prompt
16
+ prompt = create_prompt_template()
17
+ formatted_prompt = prompt.format(context=context, question=user_question)
18
+
19
+ # Generate response using Gemini AI
20
+ model = genai.GenerativeModel("gemini-1.5-flash")
21
+ response = model.generate_content(formatted_prompt)
22
+
23
+ return response.text if response.text else "No response generated."
src/vector_store.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.embeddings import HuggingFaceEmbeddings
2
+ from langchain_community.vectorstores import FAISS
3
+
4
+ # Load Hugging Face embeddings model
5
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
6
+
7
+ def create_and_save_vector_store(text_chunks, index_name="faiss_index"):
8
+ """
9
+ Creates a FAISS vector store and saves it locally.
10
+ """
11
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
12
+ vector_store.save_local(index_name)
13
+
14
+ def load_vector_store(index_name="faiss_index"):
15
+ """
16
+ Loads the FAISS vector store.
17
+ """
18
+ return FAISS.load_local(index_name, embeddings, allow_dangerous_deserialization=True)