DrishtiSharma commited on
Commit
40a5413
Β·
verified Β·
1 Parent(s): 229a73d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -26
app.py CHANGED
@@ -18,6 +18,7 @@ st.title("Blah-1")
18
 
19
  # ----------------- API Keys -----------------
20
  os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
 
21
 
22
  # ----------------- Clear ChromaDB Cache -----------------
23
  chromadb.api.client.SharedSystemClient.clear_system_cache()
@@ -117,42 +118,39 @@ if query:
117
  context = [d.page_content for d in retrieved_docs]
118
  st.success("βœ… Context retrieved successfully!")
119
 
120
- # ----------------- Full SequentialChain Execution -----------------
121
- with st.spinner("πŸ”„ Running full pipeline..."):
122
- final_output = SequentialChain(
123
- chains=[
124
- LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["retriever_query", "context"], template=relevancy_prompt), output_key="relevancy_response"),
125
- LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["relevancy_response"], template=relevant_context_picker_prompt), output_key="context_number"),
126
- LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["context_number", "context"], template=response_synth), output_key="relevant_contexts"),
127
- LLMChain(llm=rag_llm, prompt=PromptTemplate(input_variables=["query", "context"], template=rag_prompt), output_key="final_response")
128
- ],
129
- input_variables=["context", "retriever_query", "query"],
130
- output_variables=["relevancy_response", "context_number", "relevant_contexts", "final_response"]
131
- ).invoke({"context": context, "retriever_query": query, "query": query})
132
 
133
  # ----------------- Display All Outputs -----------------
134
  st.markdown("### πŸŸ₯ Context Relevancy Evaluation")
135
- st.json(final_output["relevancy_response"])
136
 
137
  st.markdown("### 🟦 Picked Relevant Contexts")
138
- st.json(final_output["context_number"])
139
 
140
  st.markdown("### πŸŸ₯ Extracted Relevant Contexts")
141
- st.json(final_output["relevant_contexts"])
142
 
143
  st.markdown("## πŸŸ₯ RAG Final Response")
144
- st.write(final_output["final_response"])
145
 
146
- # ----------------- Streamlit-Friendly Debugging (Replacing print statements) -----------------
147
- st.markdown("### Debug Logs:")
148
- st.text("\n-------- πŸŸ₯ Context Relevancy Evaluation Statement πŸŸ₯ --------\n")
149
- st.json(final_output["relevancy_response"])
150
 
151
- st.text("\n-------- 🟦 Picked Relevant Context Statement 🟦 --------\n")
152
- st.json(final_output["context_number"])
153
 
154
- st.text("\n-------- πŸŸ₯ Relevant Contexts Statement πŸŸ₯ --------\n")
155
- st.json(final_output["relevant_contexts"])
156
 
157
- st.text("\n-------- πŸŸ₯ RAG Response Statement πŸŸ₯ --------\n")
158
- st.write(final_output["final_response"])
 
18
 
19
  # ----------------- API Keys -----------------
20
  os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "")
21
+ os.environ["HF_TOKEN"] = st.secrets.get("HF_TOKEN", "")
22
 
23
  # ----------------- Clear ChromaDB Cache -----------------
24
  chromadb.api.client.SharedSystemClient.clear_system_cache()
 
118
  context = [d.page_content for d in retrieved_docs]
119
  st.success("βœ… Context retrieved successfully!")
120
 
121
+ # ----------------- Run Individual Chains Explicitly -----------------
122
+ context_relevancy_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["retriever_query", "context"], template=relevancy_prompt), output_key="relevancy_response")
123
+ relevant_context_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["relevancy_response"], template=relevant_context_picker_prompt), output_key="context_number")
124
+ relevant_contexts_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["context_number", "context"], template=response_synth), output_key="relevant_contexts")
125
+ response_chain = LLMChain(llm=rag_llm, prompt=PromptTemplate(input_variables=["query", "context"], template=rag_prompt), output_key="final_response")
126
+
127
+ response_crisis = context_relevancy_chain.invoke({"context": context, "retriever_query": query})
128
+ relevant_response = relevant_context_chain.invoke({"relevancy_response": response_crisis["relevancy_response"]})
129
+ contexts = relevant_contexts_chain.invoke({"context_number": relevant_response["context_number"], "context": context})
130
+ final_response = response_chain.invoke({"query": query, "context": contexts["relevant_contexts"]})
 
 
131
 
132
  # ----------------- Display All Outputs -----------------
133
  st.markdown("### πŸŸ₯ Context Relevancy Evaluation")
134
+ st.json(response_crisis["relevancy_response"])
135
 
136
  st.markdown("### 🟦 Picked Relevant Contexts")
137
+ st.json(relevant_response["context_number"])
138
 
139
  st.markdown("### πŸŸ₯ Extracted Relevant Contexts")
140
+ st.json(contexts["relevant_contexts"])
141
 
142
  st.markdown("## πŸŸ₯ RAG Final Response")
143
+ st.write(final_response["final_response"])
144
 
145
+ # ----------------- Debugging Output -----------------
146
+ st.text("\n-------- πŸŸ₯ context_relevancy_evaluation_chain Statement πŸŸ₯ --------\n")
147
+ st.json(response_crisis["relevancy_response"])
 
148
 
149
+ st.text("\n-------- 🟦 pick_relevant_context_chain Statement 🟦 --------\n")
150
+ st.json(relevant_response["context_number"])
151
 
152
+ st.text("\n-------- πŸŸ₯ relevant_contexts_chain Statement πŸŸ₯ --------\n")
153
+ st.json(contexts["relevant_contexts"])
154
 
155
+ st.text("\n-------- πŸŸ₯ Rag Response Statement πŸŸ₯ --------\n")
156
+ st.write(final_response["final_response"])