vineethn commited on
Commit
f7e26c6
·
verified ·
1 Parent(s): 2269f15

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -0
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain_groq import ChatGroq
3
+ from langchain.document_loaders import PyPDFLoader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+ from langchain.vectorstores import FAISS
7
+ from langchain.chains import RetrievalQA
8
+
9
+ def main():
10
+ st.title("PDF Chat with Groq LLM")
11
+
12
+ # File uploader
13
+ uploaded_file = st.file_uploader("Upload a PDF", type="pdf")
14
+
15
+ if uploaded_file is not None:
16
+ # Save the uploaded PDF temporarily
17
+ with open("temp.pdf", "wb") as f:
18
+ f.write(uploaded_file.getbuffer())
19
+
20
+ # Load the PDF
21
+ loader = PyPDFLoader("temp.pdf")
22
+ pages = loader.load()
23
+
24
+ # Split the text
25
+ text_splitter = RecursiveCharacterTextSplitter(
26
+ chunk_size=1000,
27
+ chunk_overlap=200
28
+ )
29
+ texts = text_splitter.split_documents(pages)
30
+
31
+ # Create embeddings
32
+ embeddings = HuggingFaceEmbeddings(
33
+ model_name="sentence-transformers/all-MiniLM-L6-v2"
34
+ )
35
+
36
+ # Create vector store
37
+ vectorstore = FAISS.from_documents(texts, embeddings)
38
+
39
+ # Initialize Groq LLM
40
+ llm = ChatGroq(
41
+ temperature=0.7,
42
+ model_name='llama3-70b-8192'
43
+ )
44
+
45
+ # Create QA chain
46
+ qa_chain = RetrievalQA.from_chain_type(
47
+ llm=llm,
48
+ chain_type="stuff",
49
+ retriever=vectorstore.as_retriever(search_kwargs={"k": 3})
50
+ )
51
+
52
+ # Chat input
53
+ query = st.text_input("Ask a question about the PDF:")
54
+
55
+ if query:
56
+ # Get response
57
+ response = qa_chain.invoke(query)
58
+ st.write("Response:", response['result'])
59
+
60
+ if __name__ == "__main__":
61
+ main()