chat_with_pdf / app.py
pankajsingh3012's picture
Update app.py
9be64be verified
#imporitng libraryies
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from langchain_google_genai import GoogleGenerativeAIEmbeddings
import google.generativeai as genai
from langchain.vectorstores import FAISS
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
from dotenv import load_dotenv
import base64
load_dotenv()
#get api key
os.getenv("GOOGLE_API_KEY")
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
#pdf read and convert into raw text
def get_pdf_text(pdf_docs):
text=""
for pdf in pdf_docs:
pdf_reader= PdfReader(pdf)
for page in pdf_reader.pages:
text+= page.extract_text()
return text
#making chunks of text
def get_text_chunks(text):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
chunks = text_splitter.split_text(text)
return chunks
#create embeddings and store in vector database
def get_vector_store(text_chunks):
embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
vector_store.save_local("faiss_index")
#define chain
def get_conversational_chain():
prompt_template = """
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
Context:\n {context}?\n
Question: \n{question}\n
Answer:
"""
model = ChatGoogleGenerativeAI(model="gemini-pro",
temperature=0.3)
prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"])
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
return chain
#take user input
def user_input(user_question):
embeddings = GoogleGenerativeAIEmbeddings(model = "models/embedding-001")
new_db = FAISS.load_local("faiss_index", embeddings)
docs = new_db.similarity_search(user_question)
chain = get_conversational_chain()
response = chain(
{"input_documents":docs, "question": user_question}
, return_only_outputs=True)
print(response)
st.write("Reply: ", response["output_text"])
#steamlit interface
def main():
# titleimg = "wp2856135.gif"
# # impliment background formating
# def set_bg_hack(main_bg):
# # set bg name
# main_bg_ext = "gif"
# st.markdown(
# f"""
# <style>
# .stApp {{
# background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()});
# background-repeat: no-repeat;
# background-position: right 50% bottom 95% ;
# background-size: cover;
# background-attachment: scroll;
# }}
# </style>
# """,
# unsafe_allow_html=True,
# )
# set_bg_hack(titleimg)
#st.set_page_config("Chat PDF")
st.header("Chat with PDF πŸ’")
user_question = st.text_input("Ask a Question from the PDF Files")
if user_question:
user_input(user_question)
with st.sidebar:
st.title("Menu:")
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
if st.button("Submit & Process"):
with st.spinner("Processing..."):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
get_vector_store(text_chunks)
st.success("Done")
if __name__ == "__main__":
main()