Spaces:
Runtime error
Runtime error
Commit
·
723eefb
1
Parent(s):
f5f5cd4
- __pycache__/main.cpython-310.pyc +0 -0
- main.py +5 -10
__pycache__/main.cpython-310.pyc
CHANGED
Binary files a/__pycache__/main.cpython-310.pyc and b/__pycache__/main.cpython-310.pyc differ
|
|
main.py
CHANGED
@@ -180,9 +180,7 @@ def benchmark_model_multithreaded(model_name, questions, open_router_key, openai
|
|
180 |
st.write(f"**Novelty Score:** {result['novelty_score']}")
|
181 |
results.extend(result["results"]) # Add results here
|
182 |
novelty_score += result["novelty_score"] # Update novelty score
|
183 |
-
st.
|
184 |
-
f"<span style='color:yellow'>Total novelty score across all questions (so far): {novelty_score}</span>",
|
185 |
-
unsafe_allow_html=True)
|
186 |
|
187 |
elif result["type"] == "summary":
|
188 |
st.write(f"<span style='color:blue'>Total novelty score for question '{result['question']}': {result['total_novelty']}</span>",
|
@@ -194,8 +192,7 @@ def benchmark_model_multithreaded(model_name, questions, open_router_key, openai
|
|
194 |
unsafe_allow_html=True)
|
195 |
|
196 |
|
197 |
-
st.
|
198 |
-
unsafe_allow_html=True)
|
199 |
return results
|
200 |
|
201 |
|
@@ -213,9 +210,8 @@ def benchmark_model_sequential(model_name, questions, open_router_key, openai_ap
|
|
213 |
st.write(f"**Novelty Score:** {result['novelty_score']}")
|
214 |
results.extend(result["results"])
|
215 |
novelty_score += result["novelty_score"] # Add to novelty score
|
216 |
-
st.
|
217 |
-
f"
|
218 |
-
unsafe_allow_html=True)
|
219 |
|
220 |
elif result["type"] == "summary":
|
221 |
st.write(f"<span style='color:blue'>Total novelty score for question '{result['question']}': {result['total_novelty']}</span>",
|
@@ -227,7 +223,6 @@ def benchmark_model_sequential(model_name, questions, open_router_key, openai_ap
|
|
227 |
st.write(f"<span style='color:red'>Error in thread: {result['message']}</span>",
|
228 |
unsafe_allow_html=True)
|
229 |
|
230 |
-
st.
|
231 |
-
unsafe_allow_html=True)
|
232 |
|
233 |
return results
|
|
|
180 |
st.write(f"**Novelty Score:** {result['novelty_score']}")
|
181 |
results.extend(result["results"]) # Add results here
|
182 |
novelty_score += result["novelty_score"] # Update novelty score
|
183 |
+
st.warning(f"Total novelty score across all questions (so far): {novelty_score}")
|
|
|
|
|
184 |
|
185 |
elif result["type"] == "summary":
|
186 |
st.write(f"<span style='color:blue'>Total novelty score for question '{result['question']}': {result['total_novelty']}</span>",
|
|
|
192 |
unsafe_allow_html=True)
|
193 |
|
194 |
|
195 |
+
st.warning(f"Final total novelty score across all questions: {novelty_score}")
|
|
|
196 |
return results
|
197 |
|
198 |
|
|
|
210 |
st.write(f"**Novelty Score:** {result['novelty_score']}")
|
211 |
results.extend(result["results"])
|
212 |
novelty_score += result["novelty_score"] # Add to novelty score
|
213 |
+
st.warning(
|
214 |
+
f"Total novelty score across processed questions: {novelty_score}")
|
|
|
215 |
|
216 |
elif result["type"] == "summary":
|
217 |
st.write(f"<span style='color:blue'>Total novelty score for question '{result['question']}': {result['total_novelty']}</span>",
|
|
|
223 |
st.write(f"<span style='color:red'>Error in thread: {result['message']}</span>",
|
224 |
unsafe_allow_html=True)
|
225 |
|
226 |
+
st.warning(f"Final total novelty score across all questions: {novelty_score}")
|
|
|
227 |
|
228 |
return results
|