Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
|
|
|
|
3 |
from dotenv import load_dotenv
|
4 |
import time
|
5 |
from langchain.vectorstores import Chroma
|
@@ -42,8 +44,16 @@ st.markdown("""
|
|
42 |
background-color: rgba(255, 255, 255, 0.05) !important;
|
43 |
}
|
44 |
|
45 |
-
/*
|
46 |
-
.stMarkdown, .stText, .stTitle, .stHeader, .stSubheader
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
color: white !important;
|
48 |
}
|
49 |
|
@@ -164,7 +174,13 @@ if "vector_db" not in st.session_state:
|
|
164 |
st.session_state["vector_db"] = None
|
165 |
if "query" not in st.session_state:
|
166 |
st.session_state["query"] = ""
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
start_time = time.time()
|
169 |
if st.session_state["documents"] is None or st.session_state["vector_db"] is None:
|
170 |
with st.spinner("Loading data..."):
|
@@ -183,6 +199,7 @@ retriever = vector_db.as_retriever()
|
|
183 |
|
184 |
prompt_template = """As an expert organic farming consultant with specialization in Agro-Homeopathy, analyze the following context and question to provide a clear, structured response.
|
185 |
Context: {context}
|
|
|
186 |
Question: {question}
|
187 |
Provide your response in the following format:
|
188 |
Analysis: Analyze the described plant condition
|
@@ -217,16 +234,18 @@ qa = RetrievalQA.from_chain_type(
|
|
217 |
llm=llm,
|
218 |
chain_type="stuff",
|
219 |
retriever=retriever,
|
|
|
220 |
chain_type_kwargs={
|
221 |
"prompt": PromptTemplate(
|
222 |
template=prompt_template,
|
223 |
-
input_variables=["context", "question"]
|
224 |
)
|
225 |
}
|
226 |
)
|
227 |
|
228 |
# Create a separate LLMChain for fallback
|
229 |
fallback_template = """As an expert organic farming consultant with specialization in Agro-Homeopathy, analyze the following context and question to provide a clear, structured response.
|
|
|
230 |
Question: {question}
|
231 |
Format your response as follows:
|
232 |
Analysis: Analyze the described plant condition
|
@@ -256,8 +275,38 @@ Recommendations: Provide couple of key pertinent recommendations based on the qu
|
|
256 |
Maintain a professional tone and ensure all medicine recommendations include specific potency.
|
257 |
Answer:"""
|
258 |
|
259 |
-
fallback_prompt = PromptTemplate(
|
260 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
261 |
|
262 |
# Replace your existing chat container and form section with this:
|
263 |
chat_container = st.container()
|
@@ -275,6 +324,9 @@ with st.form(key='query_form', clear_on_submit=True):
|
|
275 |
)
|
276 |
submit_button = st.form_submit_button(label='Send Message π€')
|
277 |
|
|
|
|
|
|
|
278 |
if submit_button and query:
|
279 |
# Add user message to history
|
280 |
st.session_state.messages.append({"role": "user", "content": query})
|
@@ -283,18 +335,29 @@ if submit_button and query:
|
|
283 |
with st.chat_message("user", avatar="π€"):
|
284 |
st.markdown(query)
|
285 |
|
286 |
-
# Show typing indicator while generating response
|
287 |
-
with st.chat_message("assistant", avatar=
|
288 |
with st.status("Analyzing your query...", expanded=True):
|
289 |
st.write("π Retrieving relevant information...")
|
290 |
st.write("π Generating personalized response...")
|
291 |
|
292 |
# Generate response
|
293 |
-
result = qa({
|
|
|
|
|
|
|
|
|
294 |
if result['result'].strip() == "":
|
295 |
-
response = fallback_chain.run(
|
|
|
|
|
296 |
else:
|
297 |
response = result['result']
|
|
|
|
|
|
|
|
|
|
|
298 |
|
299 |
# Display final response
|
300 |
st.markdown(response)
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
+
from langchain.memory import ConversationBufferMemory
|
4 |
+
import uuid
|
5 |
from dotenv import load_dotenv
|
6 |
import time
|
7 |
from langchain.vectorstores import Chroma
|
|
|
44 |
background-color: rgba(255, 255, 255, 0.05) !important;
|
45 |
}
|
46 |
|
47 |
+
/* Style all text elements in white */
|
48 |
+
.stMarkdown, .stText, .stTitle, .stHeader, .stSubheader,
|
49 |
+
.stTextInput label, .stSelectbox label, .st-emotion-cache-10trblm,
|
50 |
+
.st-emotion-cache-1a7jz76, .st-emotion-cache-1629p8f,
|
51 |
+
[data-testid="stTitle"], [data-testid="stSubheader"] {
|
52 |
+
color: white !important;
|
53 |
+
}
|
54 |
+
|
55 |
+
/* Additional specific selectors for title and subheader */
|
56 |
+
h1, h2, h3 {
|
57 |
color: white !important;
|
58 |
}
|
59 |
|
|
|
174 |
st.session_state["vector_db"] = None
|
175 |
if "query" not in st.session_state:
|
176 |
st.session_state["query"] = ""
|
177 |
+
if "session_id" not in st.session_state:
|
178 |
+
st.session_state.session_id = str(uuid.uuid4())
|
179 |
+
if "conversation_memory" not in st.session_state:
|
180 |
+
st.session_state.conversation_memory = ConversationBufferMemory(
|
181 |
+
memory_key="chat_history",
|
182 |
+
return_messages=True
|
183 |
+
)
|
184 |
start_time = time.time()
|
185 |
if st.session_state["documents"] is None or st.session_state["vector_db"] is None:
|
186 |
with st.spinner("Loading data..."):
|
|
|
199 |
|
200 |
prompt_template = """As an expert organic farming consultant with specialization in Agro-Homeopathy, analyze the following context and question to provide a clear, structured response.
|
201 |
Context: {context}
|
202 |
+
Previous conversation:{chat_history}
|
203 |
Question: {question}
|
204 |
Provide your response in the following format:
|
205 |
Analysis: Analyze the described plant condition
|
|
|
234 |
llm=llm,
|
235 |
chain_type="stuff",
|
236 |
retriever=retriever,
|
237 |
+
memory=st.session_state.conversation_memory,
|
238 |
chain_type_kwargs={
|
239 |
"prompt": PromptTemplate(
|
240 |
template=prompt_template,
|
241 |
+
input_variables=["context", "question", "chat_history"]
|
242 |
)
|
243 |
}
|
244 |
)
|
245 |
|
246 |
# Create a separate LLMChain for fallback
|
247 |
fallback_template = """As an expert organic farming consultant with specialization in Agro-Homeopathy, analyze the following context and question to provide a clear, structured response.
|
248 |
+
Previous conversation:{chat_history}
|
249 |
Question: {question}
|
250 |
Format your response as follows:
|
251 |
Analysis: Analyze the described plant condition
|
|
|
275 |
Maintain a professional tone and ensure all medicine recommendations include specific potency.
|
276 |
Answer:"""
|
277 |
|
278 |
+
fallback_prompt = PromptTemplate(
|
279 |
+
template=fallback_template,
|
280 |
+
input_variables=["question", "chat_history"]
|
281 |
+
)
|
282 |
+
fallback_chain = LLMChain(
|
283 |
+
llm=llm,
|
284 |
+
prompt=fallback_prompt,
|
285 |
+
memory=st.session_state.conversation_memory
|
286 |
+
)
|
287 |
+
|
288 |
+
with st.sidebar:
|
289 |
+
st.title("Conversation History")
|
290 |
+
|
291 |
+
if st.button("New Session π"):
|
292 |
+
# Clear all conversation related session states
|
293 |
+
st.session_state.messages = []
|
294 |
+
st.session_state.messages.append({
|
295 |
+
"role": "assistant",
|
296 |
+
"content": "π Hello! I'm Dr. Radha, your AI-powered Organic Farming Consultant. How can I assist you today?"
|
297 |
+
})
|
298 |
+
st.session_state.conversation_memory.clear()
|
299 |
+
st.session_state.session_id = str(uuid.uuid4())
|
300 |
+
st.rerun()
|
301 |
+
# Display conversation history
|
302 |
+
st.subheader("Previous Conversations")
|
303 |
+
for message in st.session_state.messages[1:]: # Skip the initial greeting
|
304 |
+
if message["role"] == "user":
|
305 |
+
st.text("You:")
|
306 |
+
st.text_area("", message["content"], height=50, disabled=True, key=f"hist_{uuid.uuid4()}")
|
307 |
+
else:
|
308 |
+
st.text("Dr. Radha:")
|
309 |
+
st.text_area("", message["content"], height=100, disabled=True, key=f"hist_{uuid.uuid4()}")
|
310 |
|
311 |
# Replace your existing chat container and form section with this:
|
312 |
chat_container = st.container()
|
|
|
324 |
)
|
325 |
submit_button = st.form_submit_button(label='Send Message π€')
|
326 |
|
327 |
+
human_image = "human.png"
|
328 |
+
robot_image = "bot.jpg"
|
329 |
+
|
330 |
if submit_button and query:
|
331 |
# Add user message to history
|
332 |
st.session_state.messages.append({"role": "user", "content": query})
|
|
|
335 |
with st.chat_message("user", avatar="π€"):
|
336 |
st.markdown(query)
|
337 |
|
338 |
+
# Show typing indicator while generating response "πΏ"
|
339 |
+
with st.chat_message("assistant", avatar=robot_image):
|
340 |
with st.status("Analyzing your query...", expanded=True):
|
341 |
st.write("π Retrieving relevant information...")
|
342 |
st.write("π Generating personalized response...")
|
343 |
|
344 |
# Generate response
|
345 |
+
result = qa({
|
346 |
+
"query": query,
|
347 |
+
"chat_history": st.session_state.conversation_memory.load_memory_variables({})["chat_history"]
|
348 |
+
})
|
349 |
+
|
350 |
if result['result'].strip() == "":
|
351 |
+
response = fallback_chain.run(
|
352 |
+
question=query,
|
353 |
+
chat_history=st.session_state.conversation_memory.load_memory_variables({})["chat_history"])
|
354 |
else:
|
355 |
response = result['result']
|
356 |
+
|
357 |
+
st.session_state.conversation_memory.save_context(
|
358 |
+
{"input": query},
|
359 |
+
{"output": response}
|
360 |
+
)
|
361 |
|
362 |
# Display final response
|
363 |
st.markdown(response)
|