Update crew.py
Browse files
crew.py
CHANGED
@@ -15,18 +15,18 @@ from util import read_file, get_final_answer
|
|
15 |
|
16 |
## LLMs
|
17 |
|
18 |
-
MANAGER_MODEL = "gpt-4.1"
|
19 |
-
AGENT_MODEL = "gpt-4.1"
|
20 |
|
21 |
FINAL_ANSWER_MODEL = "gemini-2.5-flash-preview-04-17"
|
22 |
|
23 |
WEB_SEARCH_MODEL = "gemini-2.5-flash-preview-04-17"
|
24 |
-
IMAGE_ANALYSIS_MODEL = "gemini-2.5-
|
25 |
AUDIO_ANALYSIS_MODEL = "gemini-2.5-flash-preview-04-17"
|
26 |
VIDEO_ANALYSIS_MODEL = "gemini-2.5-flash-preview-04-17"
|
27 |
YOUTUBE_ANALYSIS_MODEL = "gemini-2.5-flash-preview-04-17"
|
28 |
-
CODE_GENERATION_MODEL = "gemini-2.5-
|
29 |
-
CODE_EXECUTION_MODEL = "gemini-2.5-
|
30 |
|
31 |
# LLM evaluation
|
32 |
|
@@ -181,11 +181,11 @@ def run_crew(question, file_path):
|
|
181 |
|
182 |
@tool("Code Generation Tool")
|
183 |
def code_generation_tool(question: str) -> str:
|
184 |
-
"""Given a question, generate code to answer the question.
|
185 |
|
186 |
Args:
|
187 |
question (str): Question to answer
|
188 |
-
|
189 |
Returns:
|
190 |
str: Answer to the question
|
191 |
|
@@ -371,8 +371,10 @@ def run_crew(question, file_path):
|
|
371 |
initial_answer = crew.kickoff(inputs={"question": question})
|
372 |
final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(initial_answer))
|
373 |
|
|
|
374 |
print(f"Question: {question}")
|
375 |
print(f"Initial answer: {initial_answer}")
|
376 |
print(f"Final answer: {final_answer}")
|
|
|
377 |
|
378 |
return final_answer
|
|
|
15 |
|
16 |
## LLMs
|
17 |
|
18 |
+
MANAGER_MODEL = "gpt-4.1-mini"
|
19 |
+
AGENT_MODEL = "gpt-4.1-mini"
|
20 |
|
21 |
FINAL_ANSWER_MODEL = "gemini-2.5-flash-preview-04-17"
|
22 |
|
23 |
WEB_SEARCH_MODEL = "gemini-2.5-flash-preview-04-17"
|
24 |
+
IMAGE_ANALYSIS_MODEL = "gemini-2.5-flash-preview-04-17"
|
25 |
AUDIO_ANALYSIS_MODEL = "gemini-2.5-flash-preview-04-17"
|
26 |
VIDEO_ANALYSIS_MODEL = "gemini-2.5-flash-preview-04-17"
|
27 |
YOUTUBE_ANALYSIS_MODEL = "gemini-2.5-flash-preview-04-17"
|
28 |
+
CODE_GENERATION_MODEL = "gemini-2.5-flash-preview-04-17"
|
29 |
+
CODE_EXECUTION_MODEL = "gemini-2.5-flash-preview-04-17"
|
30 |
|
31 |
# LLM evaluation
|
32 |
|
|
|
181 |
|
182 |
@tool("Code Generation Tool")
|
183 |
def code_generation_tool(question: str) -> str:
|
184 |
+
"""Given a question with data, generate code to answer the question.
|
185 |
|
186 |
Args:
|
187 |
question (str): Question to answer
|
188 |
+
|
189 |
Returns:
|
190 |
str: Answer to the question
|
191 |
|
|
|
371 |
initial_answer = crew.kickoff(inputs={"question": question})
|
372 |
final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(initial_answer))
|
373 |
|
374 |
+
print("###")
|
375 |
print(f"Question: {question}")
|
376 |
print(f"Initial answer: {initial_answer}")
|
377 |
print(f"Final answer: {final_answer}")
|
378 |
+
print("###")
|
379 |
|
380 |
return final_answer
|