Update app.py
Browse files
app.py
CHANGED
@@ -154,13 +154,29 @@ if query:
|
|
154 |
st.write(context, len(context))
|
155 |
# ----------------- Run Individual Chains Explicitly -----------------
|
156 |
|
|
|
157 |
|
|
|
158 |
|
|
|
159 |
|
|
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
|
163 |
# ----------------- Display All Outputs -----------------
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
st.markdown("### Context Relevancy Evaluation")
|
165 |
st.json(response_crisis["relevancy_response"])
|
166 |
|
|
|
154 |
st.write(context, len(context))
|
155 |
# ----------------- Run Individual Chains Explicitly -----------------
|
156 |
|
157 |
+
context_relevancy_checker_prompt = PromptTemplate(input_variables=["retriever_query","context"],template=relevancy_prompt)
|
158 |
|
159 |
+
context_relevancy_evaluation_chain = LLMChain(llm=llm_judge, prompt=context_relevancy_checker_prompt, output_key="relevancy_response")
|
160 |
|
161 |
+
response_crisis = context_relevancy_evaluation_chain.invoke({"context":context,"retriever_query":query})
|
162 |
|
163 |
+
pick_relevant_context_chain = LLMChain(llm=llm_judge, prompt=relevant_prompt, output_key="context_number")
|
164 |
|
165 |
+
relevant_response = pick_relevant_context_chain.invoke({"relevancy_response":response_crisis['relevancy_response']})
|
166 |
+
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
--------
|
171 |
|
172 |
|
173 |
# ----------------- Display All Outputs -----------------
|
174 |
+
st.subheader("response_crisis")
|
175 |
+
st.json((response_crisis))
|
176 |
+
|
177 |
+
st.subheader("response_crisis['relevancy_response']")
|
178 |
+
st.json((response_crisis['relevancy_response']))
|
179 |
+
|
180 |
st.markdown("### Context Relevancy Evaluation")
|
181 |
st.json(response_crisis["relevancy_response"])
|
182 |
|