Update app.py
Browse files
app.py
CHANGED
|
@@ -26,15 +26,19 @@ class EvalInput(BaseModel):
|
|
| 26 |
async def evaluation(request:EvalInput):
|
| 27 |
|
| 28 |
prompt_version = request.promptversion
|
| 29 |
-
prompt_version_splitted=prompt_version.split(":")
|
| 30 |
|
| 31 |
-
if prompt_version_splitted[0]=="paradigm_identifier":
|
| 32 |
|
| 33 |
-
|
| 34 |
|
| 35 |
-
elif prompt_version_splitted[0]=="observational_biologist":
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
| 38 |
# Example processing (replace with actual logic)
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
|
|
|
|
| 26 |
async def evaluation(request:EvalInput):
|
| 27 |
|
| 28 |
prompt_version = request.promptversion
|
| 29 |
+
#prompt_version_splitted=prompt_version.split(":")
|
| 30 |
|
| 31 |
+
#if prompt_version_splitted[0]=="paradigm_identifier":
|
| 32 |
|
| 33 |
+
#le.Paradigm_LLM_Evaluator(prompt_version)
|
| 34 |
|
| 35 |
+
#elif prompt_version_splitted[0]=="observational_biologist":
|
| 36 |
+
try:
|
| 37 |
+
le.LLM_Evaluator(prompt_version)
|
| 38 |
+
|
| 39 |
+
#elif prompt_version_splitted[0]=="ontology_generator":
|
| 40 |
# Example processing (replace with actual logic)
|
| 41 |
+
return JSONResponse(content={"evalsuccessfull":True},status_code=200)
|
| 42 |
+
except Exception as e:
|
| 43 |
+
return JSONResponse(content={"evalsuccessfull":False},status_code=200)
|
| 44 |
|