OpenRAG128 commited on
Commit
3a709b3
·
verified ·
1 Parent(s): 9391df8

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.document_loaders import PyPDFLoader
2
+ from langchain.schema import prompt
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain.prompts import ChatPromptTemplate
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain.schema.runnable import RunnablePassthrough
7
+ from langchain_community.embeddings import HuggingFaceEmbeddings
8
+ from langchain_groq import ChatGroq
9
+ import gradio as gr
10
+
11
+ GROQ_API_KEY = "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"
12
+
13
+ loader = PyPDFLoader("Bhagavad-Gita.pdf")
14
+ docs = loader.load()
15
+
16
+ text_sp = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
17
+ splits = text_sp.split_documents(docs)
18
+
19
+ # Extract text content from Document objects
20
+ texts = [doc.page_content for doc in splits]
21
+
22
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': "cpu"})
23
+
24
+ prompt_template = """You are an AI trained on Bhagvad Geeta, a sacred Hindu scripture. You provide readings from the text and offer wisdom and guidance based on its teachings.
25
+ Your responses should reflect the spiritual and philosophical nature of the Bhagvad Gita, offering deep insights into life's questions.
26
+ When asked a question, reference specific verses when appropriate and explain their relevance to the query.
27
+ Given below is the context and question of the user,
28
+ context = {context}
29
+ question = {question}
30
+ """
31
+
32
+ prompt = ChatPromptTemplate.from_template(prompt_template)
33
+
34
+ vector_store = FAISS.from_texts(texts, embedding=embeddings)
35
+ retriever = vector_store.as_retriever()
36
+
37
+ llm = ChatGroq(model="llama3-8b-8192",
38
+ groq_api_key=GROQ_API_KEY)
39
+
40
+ rag_chain = {"context": retriever, "question": RunnablePassthrough()} | prompt | llm
41
+
42
+ def demo(name):
43
+ return rag_chain.invoke(name).content
44
+
45
+ demo = gr.Interface(fn=demo, inputs="textbox", outputs="textbox", title="Fidem.AI")
46
+ demo.launch(share=True)