Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,11 +4,12 @@ from dotenv import load_dotenv
|
|
4 |
import time
|
5 |
from langchain.vectorstores import Chroma
|
6 |
from langchain.embeddings import HuggingFaceEmbeddings
|
7 |
-
from langchain_core.prompts import ChatPromptTemplate
|
8 |
from langchain_groq import ChatGroq
|
9 |
from langchain.chains import RetrievalQA
|
10 |
from langchain.document_loaders import PyPDFLoader
|
11 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
12 |
|
13 |
# Set persistent storage path
|
14 |
PERSISTENT_DIR = "vector_db"
|
@@ -124,7 +125,7 @@ print(f"Loading and processing PDFs & vector database took {end_time - start_tim
|
|
124 |
start_time = time.time()
|
125 |
retriever = vector_db.as_retriever()
|
126 |
|
127 |
-
prompt_template = """Use the following pieces of context to answer the question at the end.
|
128 |
|
129 |
{context}
|
130 |
|
@@ -143,7 +144,16 @@ qa = RetrievalQA.from_chain_type(
|
|
143 |
)
|
144 |
}
|
145 |
)
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
chat_container = st.container()
|
149 |
|
@@ -158,10 +168,12 @@ start_time = time.time()
|
|
158 |
if submit_button and query:
|
159 |
with st.spinner("Generating response..."):
|
160 |
result = qa({"query": query})
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
|
|
|
|
165 |
|
166 |
col1, col2 = st.columns([1, 10])
|
167 |
with col1:
|
@@ -179,4 +191,4 @@ if submit_button and query:
|
|
179 |
st.session_state["query"] = ""
|
180 |
|
181 |
end_time = time.time()
|
182 |
-
print(f"Actual query took {end_time - start_time:.4f} seconds")
|
|
|
4 |
import time
|
5 |
from langchain.vectorstores import Chroma
|
6 |
from langchain.embeddings import HuggingFaceEmbeddings
|
7 |
+
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
|
8 |
from langchain_groq import ChatGroq
|
9 |
from langchain.chains import RetrievalQA
|
10 |
from langchain.document_loaders import PyPDFLoader
|
11 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
12 |
+
from langchain.chains import LLMChain
|
13 |
|
14 |
# Set persistent storage path
|
15 |
PERSISTENT_DIR = "vector_db"
|
|
|
125 |
start_time = time.time()
|
126 |
retriever = vector_db.as_retriever()
|
127 |
|
128 |
+
prompt_template = """Use the following pieces of context to answer the question at the end. If the context doesn't provide enough information to answer the question, use your existing knowledge as an Agro-Homeopathy expert to provide the best possible answer.
|
129 |
|
130 |
{context}
|
131 |
|
|
|
144 |
)
|
145 |
}
|
146 |
)
|
147 |
+
|
148 |
+
# Create a separate LLMChain for fallback
|
149 |
+
fallback_template = """You are an expert Agro-Homeopathy doctor. Answer the following question to the best of your ability:
|
150 |
+
|
151 |
+
Question: {question}
|
152 |
+
|
153 |
+
Answer:"""
|
154 |
+
|
155 |
+
fallback_prompt = PromptTemplate(template=fallback_template, input_variables=["question"])
|
156 |
+
fallback_chain = LLMChain(llm=llm, prompt=fallback_prompt)
|
157 |
|
158 |
chat_container = st.container()
|
159 |
|
|
|
168 |
if submit_button and query:
|
169 |
with st.spinner("Generating response..."):
|
170 |
result = qa({"query": query})
|
171 |
+
if result['result'].strip() == "":
|
172 |
+
# If no result from PDF, use fallback chain
|
173 |
+
fallback_result = fallback_chain.run(query)
|
174 |
+
response = fallback_result
|
175 |
+
else:
|
176 |
+
response = result['result']
|
177 |
|
178 |
col1, col2 = st.columns([1, 10])
|
179 |
with col1:
|
|
|
191 |
st.session_state["query"] = ""
|
192 |
|
193 |
end_time = time.time()
|
194 |
+
print(f"Actual query took {end_time - start_time:.4f} seconds")
|