Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,40 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
12 |
from langchain.chains import LLMChain
|
13 |
from langchain.memory import ConversationBufferMemory
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Initialize session state for chat history and memory
|
16 |
if "chat_history" not in st.session_state:
|
17 |
st.session_state.chat_history = []
|
@@ -21,10 +55,46 @@ if "memory" not in st.session_state:
|
|
21 |
return_messages=True
|
22 |
)
|
23 |
|
24 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
st.sidebar.title("Chat History")
|
26 |
|
27 |
-
#
|
28 |
if st.sidebar.button("Clear Session"):
|
29 |
st.session_state.chat_history = []
|
30 |
st.session_state.memory.clear()
|
@@ -36,7 +106,72 @@ for i, chat in enumerate(st.session_state.chat_history):
|
|
36 |
if st.sidebar.button(f"Go to Q{i+1}", key=f"btn_{i}"):
|
37 |
st.session_state.query = chat['question']
|
38 |
|
39 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
qa = RetrievalQA.from_chain_type(
|
41 |
llm=llm,
|
42 |
chain_type="stuff",
|
@@ -50,14 +185,26 @@ qa = RetrievalQA.from_chain_type(
|
|
50 |
}
|
51 |
)
|
52 |
|
53 |
-
#
|
|
|
|
|
|
|
|
|
54 |
fallback_chain = LLMChain(
|
55 |
llm=llm,
|
56 |
prompt=fallback_prompt,
|
57 |
memory=st.session_state.memory
|
58 |
)
|
59 |
|
60 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
if submit_button and query:
|
62 |
with st.spinner("Generating response..."):
|
63 |
# Add query to chat history
|
@@ -99,9 +246,8 @@ if submit_button and query:
|
|
99 |
if chat['question'] == query:
|
100 |
st.markdown(f"{response}")
|
101 |
else:
|
102 |
-
# Retrieve previous response from memory
|
103 |
prev_response = st.session_state.memory.load_memory_variables({})
|
104 |
-
st.markdown(f"{prev_response}")
|
105 |
|
106 |
st.markdown("---")
|
107 |
st.session_state["query"] = ""
|
|
|
12 |
from langchain.chains import LLMChain
|
13 |
from langchain.memory import ConversationBufferMemory
|
14 |
|
15 |
+
# Set persistent storage path
|
16 |
+
PERSISTENT_DIR = "vector_db"
|
17 |
+
|
18 |
+
def initialize_vector_db():
|
19 |
+
if os.path.exists(PERSISTENT_DIR) and os.listdir(PERSISTENT_DIR):
|
20 |
+
embeddings = HuggingFaceEmbeddings()
|
21 |
+
vector_db = Chroma(persist_directory=PERSISTENT_DIR, embedding_function=embeddings)
|
22 |
+
return None, vector_db
|
23 |
+
|
24 |
+
base_dir = os.path.dirname(os.path.abspath(__file__))
|
25 |
+
pdf_files = [f for f in os.listdir(base_dir) if f.endswith('.pdf')]
|
26 |
+
loaders = [PyPDFLoader(os.path.join(base_dir, fn)) for fn in pdf_files]
|
27 |
+
|
28 |
+
documents = []
|
29 |
+
for loader in loaders:
|
30 |
+
documents.extend(loader.load())
|
31 |
+
|
32 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
33 |
+
chunk_size=1000,
|
34 |
+
chunk_overlap=200,
|
35 |
+
length_function=len,
|
36 |
+
separators=["\n\n", "\n", " ", ""]
|
37 |
+
)
|
38 |
+
texts = text_splitter.split_documents(documents)
|
39 |
+
|
40 |
+
embeddings = HuggingFaceEmbeddings()
|
41 |
+
vector_db = Chroma.from_documents(
|
42 |
+
texts,
|
43 |
+
embeddings,
|
44 |
+
persist_directory=PERSISTENT_DIR
|
45 |
+
)
|
46 |
+
vector_db.persist()
|
47 |
+
return documents, vector_db
|
48 |
+
|
49 |
# Initialize session state for chat history and memory
|
50 |
if "chat_history" not in st.session_state:
|
51 |
st.session_state.chat_history = []
|
|
|
55 |
return_messages=True
|
56 |
)
|
57 |
|
58 |
+
# System instructions for the LLM
|
59 |
+
system_prompt = """You are an expert organic farming consultant with specialization in Agro-Homeopathy. When providing suggestions and remedies:
|
60 |
+
1. Always specify medicine potency as 6c unless the uploaded text mentions some other value explicitly
|
61 |
+
3. Provide comprehensive diagnosis and treatment advice along with organic farming best practices applicable in the given context
|
62 |
+
4. Base recommendations on homeopathic and organic farming principles
|
63 |
+
"""
|
64 |
+
|
65 |
+
api_key1 = os.getenv("api_key")
|
66 |
+
|
67 |
+
# Page configuration
|
68 |
+
start_time = time.time()
|
69 |
+
st.set_page_config(page_title="Dr. Radha: The Agro-Homeopath", page_icon="🚀", layout="wide")
|
70 |
+
|
71 |
+
# CSS styling
|
72 |
+
st.markdown("""
|
73 |
+
<style>
|
74 |
+
.stApp {
|
75 |
+
background-color: #1B4D3E !important;
|
76 |
+
color: white !important;
|
77 |
+
}
|
78 |
+
.stTextInput>div>div>input {
|
79 |
+
color: black !important;
|
80 |
+
background-color: rgba(255,255,255,0.1) !important;
|
81 |
+
}
|
82 |
+
.stButton>button {
|
83 |
+
color: black !important;
|
84 |
+
background-color: yellow !important;
|
85 |
+
}
|
86 |
+
#the-title {
|
87 |
+
text-align: center;
|
88 |
+
font-size: 24px;
|
89 |
+
color: white;
|
90 |
+
}
|
91 |
+
</style>
|
92 |
+
""", unsafe_allow_html=True)
|
93 |
+
|
94 |
+
# Sidebar
|
95 |
st.sidebar.title("Chat History")
|
96 |
|
97 |
+
# Clear Session button
|
98 |
if st.sidebar.button("Clear Session"):
|
99 |
st.session_state.chat_history = []
|
100 |
st.session_state.memory.clear()
|
|
|
106 |
if st.sidebar.button(f"Go to Q{i+1}", key=f"btn_{i}"):
|
107 |
st.session_state.query = chat['question']
|
108 |
|
109 |
+
# Main content
|
110 |
+
st.title("🌿 Dr. Radha: AI-Powered Organic Farming Consultant")
|
111 |
+
st.subheader("Specializing in Agro-Homeopathy | Free Consultation")
|
112 |
+
|
113 |
+
st.markdown("""
|
114 |
+
Please provide complete details about the issue, including:
|
115 |
+
- Detailed description of plant problem
|
116 |
+
- Current location, temperature & weather conditions
|
117 |
+
""")
|
118 |
+
|
119 |
+
human_image = "human.png"
|
120 |
+
robot_image = "bot.jpg"
|
121 |
+
|
122 |
+
# Set up Groq API
|
123 |
+
llm = ChatGroq(
|
124 |
+
api_key=api_key1,
|
125 |
+
max_tokens=None,
|
126 |
+
timeout=None,
|
127 |
+
max_retries=2,
|
128 |
+
temperature=0.7,
|
129 |
+
model="llama-3.1-70b-versatile"
|
130 |
+
)
|
131 |
+
|
132 |
+
embeddings = HuggingFaceEmbeddings()
|
133 |
+
|
134 |
+
# Initialize session state for documents and vector_db
|
135 |
+
if "documents" not in st.session_state:
|
136 |
+
st.session_state["documents"] = None
|
137 |
+
if "vector_db" not in st.session_state:
|
138 |
+
st.session_state["vector_db"] = None
|
139 |
+
if "query" not in st.session_state:
|
140 |
+
st.session_state["query"] = ""
|
141 |
+
|
142 |
+
# Initialize vector database
|
143 |
+
if st.session_state["documents"] is None or st.session_state["vector_db"] is None:
|
144 |
+
with st.spinner("Loading data..."):
|
145 |
+
documents, vector_db = initialize_vector_db()
|
146 |
+
st.session_state["documents"] = documents
|
147 |
+
st.session_state["vector_db"] = vector_db
|
148 |
+
else:
|
149 |
+
documents = st.session_state["documents"]
|
150 |
+
vector_db = st.session_state["vector_db"]
|
151 |
+
|
152 |
+
retriever = vector_db.as_retriever()
|
153 |
+
|
154 |
+
# Prompt templates
|
155 |
+
prompt_template = """As an expert organic farming consultant with specialization in Agro-Homeopathy, analyze the following context and question to provide a clear, structured response.
|
156 |
+
|
157 |
+
Context: {context}
|
158 |
+
Chat History: {chat_history}
|
159 |
+
Question: {question}
|
160 |
+
|
161 |
+
[Rest of your prompt template...]
|
162 |
+
|
163 |
+
Answer:"""
|
164 |
+
|
165 |
+
fallback_template = """As an expert organic farming consultant with specialization in Agro-Homeopathy, analyze the following context and question to provide a clear, structured response.
|
166 |
+
|
167 |
+
Chat History: {chat_history}
|
168 |
+
Question: {question}
|
169 |
+
|
170 |
+
[Rest of your fallback template...]
|
171 |
+
|
172 |
+
Answer:"""
|
173 |
+
|
174 |
+
# Create QA chain with memory
|
175 |
qa = RetrievalQA.from_chain_type(
|
176 |
llm=llm,
|
177 |
chain_type="stuff",
|
|
|
185 |
}
|
186 |
)
|
187 |
|
188 |
+
# Create fallback chain with memory
|
189 |
+
fallback_prompt = PromptTemplate(
|
190 |
+
template=fallback_template,
|
191 |
+
input_variables=["question", "chat_history"]
|
192 |
+
)
|
193 |
fallback_chain = LLMChain(
|
194 |
llm=llm,
|
195 |
prompt=fallback_prompt,
|
196 |
memory=st.session_state.memory
|
197 |
)
|
198 |
|
199 |
+
# Chat container
|
200 |
+
chat_container = st.container()
|
201 |
+
|
202 |
+
# Query form
|
203 |
+
with st.form(key='query_form'):
|
204 |
+
query = st.text_input("Ask your question:", value=st.session_state.get("query", ""))
|
205 |
+
submit_button = st.form_submit_button(label='Submit')
|
206 |
+
|
207 |
+
# Handle form submission
|
208 |
if submit_button and query:
|
209 |
with st.spinner("Generating response..."):
|
210 |
# Add query to chat history
|
|
|
246 |
if chat['question'] == query:
|
247 |
st.markdown(f"{response}")
|
248 |
else:
|
|
|
249 |
prev_response = st.session_state.memory.load_memory_variables({})
|
250 |
+
st.markdown(f"{prev_response.get('chat_history', '')}")
|
251 |
|
252 |
st.markdown("---")
|
253 |
st.session_state["query"] = ""
|