Junaidb commited on
Commit
58da707
·
verified ·
1 Parent(s): c9b0834

Update llmeval.py

Browse files
Files changed (1) hide show
  1. llmeval.py +11 -20
llmeval.py CHANGED
@@ -214,7 +214,15 @@ class LLM_as_Evaluator():
214
  "unit_coherence": SYSTEM_PROMPT_FOR_TRIAD_COHERENCE
215
  }
216
 
217
- evaluation_responses=[]
 
 
 
 
 
 
 
 
218
  for metric in metrics:
219
  system_prompt = prompt_map[metric]
220
 
@@ -228,25 +236,8 @@ class LLM_as_Evaluator():
228
  ]
229
 
230
  evaluation_response = self.___engine_core(messages=messages)
231
- evaluation_responses.append({metric: evaluation_response})
232
-
233
-
234
-
235
- data={
236
- "promptversion":promptversion,
237
- "biological_context_alignment":"",
238
- "contextual_relevance_alignment":"",
239
- "unit_coherence":"",
240
- "response_specificity":""
241
- }
242
-
243
- for resp in evaluation_responses:
244
-
245
- data["biological_context_alignment"]=resp["biological_context_alignment"]
246
- data["contextual_relevance_alignment"]=resp["contextual_relevance_alignment"]
247
- data["unit_coherence"]=resp["unit_coherence"]
248
- data["response_specificity"]=resp["response_specificity"]
249
-
250
 
251
  de.Update(data=data)
252
 
 
214
  "unit_coherence": SYSTEM_PROMPT_FOR_TRIAD_COHERENCE
215
  }
216
 
217
+ #evaluation_responses=[]
218
+ data={
219
+ "promptversion":promptversion,
220
+ "biological_context_alignment":"",
221
+ "contextual_relevance_alignment":"",
222
+ "unit_coherence":"",
223
+ "response_specificity":""
224
+ }
225
+
226
  for metric in metrics:
227
  system_prompt = prompt_map[metric]
228
 
 
236
  ]
237
 
238
  evaluation_response = self.___engine_core(messages=messages)
239
+ data[metric]=evaluation_response
240
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
 
242
  de.Update(data=data)
243