bstraehle commited on
Commit
948a6f4
·
verified ·
1 Parent(s): 954a4ca

Update crew.py

Browse files
Files changed (1) hide show
  1. crew.py +9 -7
crew.py CHANGED
@@ -16,6 +16,8 @@ from util import get_final_answer
16
 
17
  MANAGER_MODEL = "gpt-4.1"
18
  AGENT_MODEL = "gpt-4.1"
 
 
19
 
20
  PHOENIX_API_KEY = os.environ["PHOENIX_API_KEY"]
21
 
@@ -29,7 +31,7 @@ tracer_provider = register(
29
 
30
  CrewAIInstrumentor().instrument(tracer_provider=tracer_provider)
31
 
32
- def run_crew(question, file_name):
33
  # Tools
34
 
35
  @tool("Audio Analysis Tool")
@@ -38,7 +40,7 @@ def run_crew(question, file_name):
38
 
39
  Args:
40
  question (str): Question to answer
41
- file_name (str): Name of the audio file to transcribe
42
 
43
  Returns:
44
  str: Transcribed text from the audio file
@@ -47,15 +49,15 @@ def run_crew(question, file_name):
47
  FileNotFoundError: If audio file does not exist
48
  RuntimeError: If transcription fails"""
49
  ###
50
- if not os.path.exists(file_name):
51
- raise FileNotFoundError(f"Audio file not found: {file_name}")
52
 
53
  try:
54
  # Load Whisper model (using base model by default)
55
  client = OpenAI()
56
  client.audio.transcriptions.create(
57
- file=open(audio_filepath, "rb"),
58
- model="whisper-1",
59
  prompt=prompt,
60
  )
61
  return transcript.text
@@ -165,7 +167,7 @@ def run_crew(question, file_name):
165
  question = f"{question} File data:\n{file.read()}"
166
 
167
  initial_answer = crew.kickoff(inputs={"question": question})
168
- final_answer = get_final_answer(question, str(initial_answer))
169
 
170
  print(f"Question: {question}")
171
  print(f"Initial answer: {initial_answer}")
 
16
 
17
  MANAGER_MODEL = "gpt-4.1"
18
  AGENT_MODEL = "gpt-4.1"
19
+ STT_MODEL = "whisper-1"
20
+ FINAL_ANSWER_MODEL = ""
21
 
22
  PHOENIX_API_KEY = os.environ["PHOENIX_API_KEY"]
23
 
 
31
 
32
  CrewAIInstrumentor().instrument(tracer_provider=tracer_provider)
33
 
34
+ def run_crew(question, file_path):
35
  # Tools
36
 
37
  @tool("Audio Analysis Tool")
 
40
 
41
  Args:
42
  question (str): Question to answer
43
+ file_path (str): Path of the audio file to transcribe
44
 
45
  Returns:
46
  str: Transcribed text from the audio file
 
49
  FileNotFoundError: If audio file does not exist
50
  RuntimeError: If transcription fails"""
51
  ###
52
+ if not os.path.exists(file_path):
53
+ raise FileNotFoundError(f"Audio file not found: {file_path}")
54
 
55
  try:
56
  # Load Whisper model (using base model by default)
57
  client = OpenAI()
58
  client.audio.transcriptions.create(
59
+ file=open(file_path, "rb"),
60
+ model=STT_MODEL,
61
  prompt=prompt,
62
  )
63
  return transcript.text
 
167
  question = f"{question} File data:\n{file.read()}"
168
 
169
  initial_answer = crew.kickoff(inputs={"question": question})
170
+ final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(initial_answer))
171
 
172
  print(f"Question: {question}")
173
  print(f"Initial answer: {initial_answer}")