bstraehle commited on
Commit
315329c
·
verified ·
1 Parent(s): cc5b408

Update crew.py

Browse files
Files changed (1) hide show
  1. crew.py +52 -12
crew.py CHANGED
@@ -15,16 +15,17 @@ from util import get_final_answer
15
 
16
  ## LLMs
17
 
18
- MANAGER_MODEL = "gpt-4.1"
19
- AGENT_MODEL = "gpt-4.1"
20
 
21
- FINAL_ANSWER_MODEL = "gemini-2.0-flash"
22
 
23
- IMAGE_MODEL = "gemini-2.0-flash-thinking-exp-01-21"
24
- AUDIO_MODEL = "gemini-2.0-flash-thinking-exp-01-21"
25
- VIDEO_MODEL = "gemini-2.0-flash-thinking-exp-01-21"
26
- YOUTUBE_MODEL = "gemini-2.0-flash-thinking-exp-01-21"
27
- DOCUMENT_MODEL = "gemini-2.0-flash-thinking-exp-01-21"
 
28
 
29
  # LLM evaluation
30
 
@@ -45,7 +46,7 @@ def run_crew(question, file_path):
45
 
46
  web_search_tool = SerperDevTool()
47
  web_rag_tool = WebsiteSearchTool()
48
- code_execution_tool = CodeInterpreterTool()
49
 
50
  @tool("Image Analysis Tool")
51
  def image_analysis_tool(question: str, file_path: str) -> str:
@@ -181,7 +182,46 @@ def run_crew(question, file_path):
181
  return response.text
182
  except Exception as e:
183
  raise RuntimeError(f"Processing failed: {str(e)}")
 
 
 
 
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  # Agents
186
 
187
  web_search_agent = Agent(
@@ -301,9 +341,9 @@ def run_crew(question, file_path):
301
  if file_path:
302
  question = f"{question} File path: {file_path}."
303
 
304
- if file_path.endswith(".py"):
305
- with open(f"{file_path}", "r") as file:
306
- question = f"{question} File data:\n{file.read()}"
307
 
308
  initial_answer = crew.kickoff(inputs={"question": question})
309
  final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(initial_answer))
 
15
 
16
  ## LLMs
17
 
18
+ MANAGER_MODEL = "gpt-4.1-mini"
19
+ AGENT_MODEL = "gpt-4.1-mini"
20
 
21
+ FINAL_ANSWER_MODEL = "gemini-2.5-flash-preview-04-17"
22
 
23
+ IMAGE_MODEL = "gemini-2.5-flash-preview-04-17"
24
+ AUDIO_MODEL = "gemini-2.5-flash-preview-04-17"
25
+ VIDEO_MODEL = "gemini-2.5-flash-preview-04-17"
26
+ YOUTUBE_MODEL = "gemini-2.5-flash-preview-04-17"
27
+ DOCUMENT_MODEL = "gemini-2.5-flash-preview-04-17"
28
+ CODE_MODEL = "gemini-2.5-flash-preview-04-17"
29
 
30
  # LLM evaluation
31
 
 
46
 
47
  web_search_tool = SerperDevTool()
48
  web_rag_tool = WebsiteSearchTool()
49
+ #code_execution_tool = CodeInterpreterTool()
50
 
51
  @tool("Image Analysis Tool")
52
  def image_analysis_tool(question: str, file_path: str) -> str:
 
182
  return response.text
183
  except Exception as e:
184
  raise RuntimeError(f"Processing failed: {str(e)}")
185
+
186
+ @tool("Code Execution Tool")
187
+ def code_execution_tool(question: str, file_path: str) -> str:
188
+ """Answer a question about a Python code file.
189
 
190
+ Args:
191
+ question (str): Question about a Pyton code file
192
+ file_path (str): The Python code file path
193
+
194
+ Returns:
195
+ str: Answer to the question about the Python code file
196
+
197
+ Raises:
198
+ RuntimeError: If processing fails"""
199
+ try:
200
+ client = genai.Client(api_key=os.environ["GEMINI_API_KEY"])
201
+
202
+ ###
203
+ #file = client.files.upload(file=file_path)
204
+
205
+ response = client.models.generate_content(
206
+ model=CODE_MODEL,
207
+ contents=question,
208
+ config=types.GenerateContentConfig(
209
+ tools=[types.Tool(code_execution=types.ToolCodeExecution)]
210
+ ),
211
+ )
212
+
213
+ for part in response.candidates[0].content.parts:
214
+ if part.text is not None:
215
+ print(part.text)
216
+ if part.executable_code is not None:
217
+ print(part.executable_code.code)
218
+ if part.code_execution_result is not None:
219
+ print(part.code_execution_result.output)
220
+ return part.code_execution_result.output
221
+ ###
222
+ except Exception as e:
223
+ raise RuntimeError(f"Processing failed: {str(e)}")
224
+
225
  # Agents
226
 
227
  web_search_agent = Agent(
 
341
  if file_path:
342
  question = f"{question} File path: {file_path}."
343
 
344
+ #if file_path.endswith(".py"):
345
+ # with open(f"{file_path}", "r") as file:
346
+ # question = f"{question} File data:\n{file.read()}"
347
 
348
  initial_answer = crew.kickoff(inputs={"question": question})
349
  final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(initial_answer))