tonko22 commited on
Commit
013068f
·
1 Parent(s): cb31ce4

refactor: Enhance logging with loguru and standardize code documentation

Browse files

- Refactor token counting logic in Gradio_UI.py for improved maintainability
- Improve error handling messages in API call retry mechanism
- Update .gitignore

Files changed (4) hide show
  1. .gitignore +5 -1
  2. Gradio_UI.py +29 -8
  3. app.py +183 -41
  4. pyproject.toml +1 -0
.gitignore CHANGED
@@ -1,3 +1,7 @@
1
  .env
2
  .venv
3
- uv.lock
 
 
 
 
 
1
  .env
2
  .venv
3
+ uv.lock
4
+ .python-version
5
+ .gradio
6
+ __pycache__/
7
+ *.pyc
Gradio_UI.py CHANGED
@@ -124,14 +124,35 @@ def stream_to_gradio(
124
  total_input_tokens = 0
125
  total_output_tokens = 0
126
 
127
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
128
- # Track tokens if model provides them
129
- if hasattr(agent.model, "last_input_token_count"):
 
 
 
 
 
 
 
 
 
 
 
130
  total_input_tokens += agent.model.last_input_token_count
131
- total_output_tokens += agent.model.last_output_token_count
132
- if isinstance(step_log, ActionStep):
133
- step_log.input_token_count = agent.model.last_input_token_count
134
- step_log.output_token_count = agent.model.last_output_token_count
 
 
 
 
 
 
 
 
 
 
135
 
136
  for message in pull_messages_from_step(
137
  step_log,
@@ -284,7 +305,7 @@ class GradioUI:
284
  [stored_messages, text_input],
285
  ).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
286
 
287
- demo.launch(debug=False, share=True, **kwargs)
288
 
289
 
290
  __all__ = ["stream_to_gradio", "GradioUI"]
 
124
  total_input_tokens = 0
125
  total_output_tokens = 0
126
 
127
+ def has_input_token_count(model):
128
+ """Checks if the model has an input token counter"""
129
+ return hasattr(model, "last_input_token_count") and model.last_input_token_count is not None
130
+
131
+ def has_output_token_count(model):
132
+ """Checks if the model has an output token counter"""
133
+ return hasattr(model, "last_output_token_count") and model.last_output_token_count is not None
134
+
135
+ def update_token_counters(step):
136
+ """Updates token counters for the step if possible"""
137
+ nonlocal total_input_tokens, total_output_tokens
138
+
139
+ if has_input_token_count(agent.model):
140
+ # Update the total input token counter
141
  total_input_tokens += agent.model.last_input_token_count
142
+
143
+ # Update the output token counter if available
144
+ if has_output_token_count(agent.model):
145
+ total_output_tokens += agent.model.last_output_token_count
146
+
147
+ # If the step is an ActionStep, save the counters in it
148
+ if isinstance(step, ActionStep):
149
+ step.input_token_count = agent.model.last_input_token_count
150
+ if has_output_token_count(agent.model):
151
+ step.output_token_count = agent.model.last_output_token_count
152
+
153
+ for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
154
+ # Update token counters if the model provides them
155
+ update_token_counters(step_log)
156
 
157
  for message in pull_messages_from_step(
158
  step_log,
 
305
  [stored_messages, text_input],
306
  ).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
307
 
308
+ demo.launch(**kwargs)
309
 
310
 
311
  __all__ = ["stream_to_gradio", "GradioUI"]
app.py CHANGED
@@ -1,5 +1,8 @@
1
  import os
2
- import yaml
 
 
 
3
  from Gradio_UI import GradioUI
4
  from litellm import completion
5
  from smolagents import (
@@ -9,69 +12,208 @@ from smolagents import (
9
  LiteLLMModel,
10
  VisitWebpageTool,
11
  tool,
 
12
  )
13
 
 
 
 
 
 
 
 
 
14
  os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  @tool
17
  def analyze_lyrics_tool(song_title: str, artist: str, lyrics: str) -> str:
18
  """
19
  Performs a deep analysis of the musical track, given its metadata.
20
-
21
  Args:
22
- song_title: title of the song or music trach.
23
  artist: The name of the artist.
24
  lyrics: The lyrics of the song.
25
-
26
  Returns:
27
  A summary of the song's meaning in English.
28
  """
29
 
30
- prompt = f'''You are an expert in songs and their meanings.
31
  Summarize the meaning of {song_title} by {artist} and identify
32
  key themes based on the lyrics:
33
  {lyrics}.
34
 
35
- Includs deep idea and vibes analysis with explainations
36
- based on references to the exact lines
37
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- response = completion(
40
- model="gemini/gemini-2.0-flash",
41
- messages=[
42
- {"role": "user", "content": prompt}
43
- ])
 
 
 
 
 
 
 
44
 
45
- try:
46
- lyrics = response.choices[0].message.content.strip()
47
- return lyrics
48
- except (AttributeError, KeyError, IndexError):
49
  try:
50
- lyrics = response['choices'][0]['message']['content'].strip()
51
- return lyrics
52
- except (AttributeError, KeyError, IndexError):
53
- pass
54
-
55
- final_answer = FinalAnswerTool()
56
- model = LiteLLMModel(model_id="gemini/gemini-2.0-flash")
57
- with open("prompts.yaml", 'r') as stream:
58
- prompt_templates = yaml.safe_load(stream)
59
-
60
- # Example usage within the agent
61
- agent = CodeAgent(
62
- tools=[
63
- FinalAnswerTool(),
64
- DuckDuckGoSearchTool(),
65
- VisitWebpageTool(),
66
- analyze_lyrics_tool
67
- ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  model=model,
69
- additional_authorized_imports=['numpy', 'bs4'],
 
 
 
70
  max_steps=22,
71
- verbosity_level=0,
72
- name="Song Lyrics Analyzer",
73
- description="Analyze the meaning of song lyrics and identify key themes based on the lyrics using web search tools and deep lyrics analysis tool.",
74
- prompt_templates=prompt_templates
 
 
 
 
 
 
 
 
75
  )
76
 
77
- GradioUI(agent).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import time
3
+ import random
4
+ import traceback
5
+ from loguru import logger
6
  from Gradio_UI import GradioUI
7
  from litellm import completion
8
  from smolagents import (
 
12
  LiteLLMModel,
13
  VisitWebpageTool,
14
  tool,
15
+ Tool,
16
  )
17
 
18
+ # Setting up logging with loguru - only terminal output
19
+ logger.remove() # Remove default handlers
20
+ logger.add(
21
+ lambda msg: print(msg, end=""),
22
+ level="INFO",
23
+ format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{message}</cyan>")
24
+
25
+ # API key configuration
26
  os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
27
 
28
+
29
+ class LyricsSearchTool(Tool):
30
+ """
31
+ Uses web search to find song lyrics based on song title and artist name
32
+
33
+ The search query should include the song title and artist name. The tool
34
+ will return the lyrics of the song if found.
35
+
36
+ Parameters
37
+ ----------
38
+ query : str
39
+ The search query for finding song lyrics. Should include song title and artist name.
40
+
41
+ Returns
42
+ -------
43
+ str
44
+ The lyrics of the song if found, otherwise an empty string.
45
+ """
46
+ name = "lyrics_search_tool"
47
+ description = "Uses web search to find song lyrics based on song title and artist name"
48
+ inputs = {
49
+ "query": {
50
+ "type": "string",
51
+ "description": "The search query for finding song lyrics. Should include song title and artist name.",
52
+ }
53
+ }
54
+ output_type = "string"
55
+
56
+ def __init__(self, **kwargs):
57
+ super().__init__(**kwargs)
58
+
59
+ def forward(self, query: str) -> str:
60
+ assert isinstance(query, str), "Your search query must be a string"
61
+ # TODO: Implement lyrics search functionality
62
+ return "Lyrics search not implemented yet"
63
+
64
+
65
  @tool
66
  def analyze_lyrics_tool(song_title: str, artist: str, lyrics: str) -> str:
67
  """
68
  Performs a deep analysis of the musical track, given its metadata.
69
+
70
  Args:
71
+ song_title: Title of the song or music track.
72
  artist: The name of the artist.
73
  lyrics: The lyrics of the song.
74
+
75
  Returns:
76
  A summary of the song's meaning in English.
77
  """
78
 
79
+ prompt = f"""You are an expert in songs and their meanings.
80
  Summarize the meaning of {song_title} by {artist} and identify
81
  key themes based on the lyrics:
82
  {lyrics}.
83
 
84
+ Include deep idea and vibes analysis with explanations
85
+ based on references to the exact lines.
86
+ """
87
+
88
+ # If the USE_ANTHROPIC environment variable is defined, use the Claude model
89
+ if os.getenv("USE_ANTHROPIC", "false").lower() == "true":
90
+ model_to_use = "claude-3-haiku-20240307"
91
+ logger.info("Using Anthropic model: {} for lyrics analysis", model_to_use)
92
+ else:
93
+ model_to_use = "gemini/gemini-2.0-flash"
94
+ logger.info("Using Gemini model: {} for lyrics analysis", model_to_use)
95
+
96
+ # Use the function with retry mechanism
97
+ logger.info("Analyzing lyrics for song: '{}' by '{}'", song_title, artist)
98
+ return _make_api_call_with_retry(model_to_use, prompt)
99
+
100
+
101
+ # Function with manual implementation of retry mechanism
102
+ def _make_api_call_with_retry(model: str, prompt: str) -> str:
103
+ """
104
+ Makes an API call with a retry mechanism for error handling.
105
 
106
+ Args:
107
+ model: The model identifier to use.
108
+ prompt: The prompt text to send to the model.
109
+
110
+ Returns:
111
+ The response from the model as a string.
112
+ """
113
+ max_attempts = 20
114
+ base_delay = 10
115
+ max_delay = 60
116
+ attempt = 0
117
+ last_exception = None
118
 
119
+ while attempt < max_attempts:
 
 
 
120
  try:
121
+ # Add a small random delay to prevent simultaneous requests
122
+ jitter = random.uniform(0.1, 1.0)
123
+ time.sleep(jitter)
124
+
125
+ # If this is a retry attempt, add exponential backoff delay
126
+ if attempt > 0:
127
+ delay = min(base_delay * (2 ** (attempt - 1)), max_delay)
128
+ time.sleep(delay)
129
+
130
+ response = completion(
131
+ model=model,
132
+ messages=[{"role": "user", "content": prompt}],
133
+ num_retries=2, # Built-in retry mechanism of LiteLLM
134
+ )
135
+
136
+ # Try to extract the content from the response
137
+ try:
138
+ analysis_result = response.choices[0].message.content.strip()
139
+ return analysis_result
140
+ except (AttributeError, KeyError, IndexError):
141
+ try:
142
+ analysis_result = response["choices"][0]["message"]["content"].strip()
143
+ return analysis_result
144
+ except (AttributeError, KeyError, IndexError):
145
+ # If we couldn't extract the content, return an error
146
+ raise ValueError("Failed to extract content from response")
147
+
148
+ except (ConnectionError, TimeoutError) as e:
149
+ last_exception = e
150
+ logger.warning("API call failed (attempt {}/{}) for model {}: {}. Retrying...", attempt+1, max_attempts, model, str(e))
151
+ attempt += 1
152
+ continue
153
+ except Exception as e:
154
+ logger.error("Unexpected error: {}", str(e))
155
+ logger.error(traceback.format_exc())
156
+ raise # For other exceptions, we don't retry
157
+
158
+ # If all attempts failed, re-raise the last exception
159
+ if last_exception:
160
+ logger.error("All {} attempts failed. Last error: {}", max_attempts, str(last_exception))
161
+ raise last_exception
162
+
163
+
164
+ # TODO: use DuckDuckGoSearchTool to find related information
165
+ # for explanation in case the LLM itself is not confident or doesn't know
166
+ #
167
+
168
+ # Check if we need to use Anthropic for local testing
169
+ use_anthropic = os.getenv("USE_ANTHROPIC", "false").lower() == "true"
170
+
171
+ # Configure Anthropic API key if needed
172
+ if use_anthropic:
173
+ os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")
174
+ model = LiteLLMModel(model_id="claude-3-haiku-20240307")
175
+ logger.info("Using Anthropic Claude model for local testing")
176
+ else:
177
+ model = LiteLLMModel(model_id="gemini/gemini-2.0-flash")
178
+ logger.info("Using Gemini model as default")
179
+
180
+ web_agent = CodeAgent(
181
  model=model,
182
+ tools=[DuckDuckGoSearchTool(), VisitWebpageTool()],
183
+ name="lyrics_search_agent",
184
+ description="Browses the web to find original full lyrics and scrape them. Excels at building effective search queries",
185
+ additional_authorized_imports=["numpy", "bs4"],
186
  max_steps=22,
187
+ verbosity_level=2,
188
+ )
189
+
190
+
191
+ analysis_agent = CodeAgent(
192
+ model=model,
193
+ tools=[DuckDuckGoSearchTool(), VisitWebpageTool(), analyze_lyrics_tool],
194
+ name="lyrics_analysis_agent",
195
+ description="You are a Song Analysis Expert with deep knowledge of music theory, lyrical interpretation, cultural contexts, and music history. Your role is to analyze song lyrics to uncover their deeper meaning, artistic significance, and historical context.",
196
+ additional_authorized_imports=["numpy", "bs4"],
197
+ max_steps=50,
198
+ verbosity_level=2,
199
  )
200
 
201
+
202
+ # When using the DuckDuckGoSearchTool, clearly indicate when information comes from external research versus your own knowledge base.
203
+ manager_agent = CodeAgent(
204
+ model=model,
205
+ tools=[FinalAnswerTool()],
206
+ name="manager_agent",
207
+ description="Manages the search process and coordinates the search and analysis of song lyrics.",
208
+ managed_agents=[web_agent, analysis_agent],
209
+ additional_authorized_imports=["json"],
210
+ planning_interval=5,
211
+ verbosity_level=2,
212
+ max_steps=15,
213
+ )
214
+
215
+ logger.info("Initializing Gradio UI and launching server")
216
+ GradioUI(manager_agent).launch(
217
+ debug=True, share=False, server_name="127.0.0.1", server_port=3000
218
+ )
219
+ logger.success("Server started successfully")
pyproject.toml CHANGED
@@ -8,5 +8,6 @@ dependencies = [
8
  "gradio>=5.20.0",
9
  "huggingface-hub>=0.29.1",
10
  "litellm>=1.61.20",
 
11
  "smolagents>=1.9.2",
12
  ]
 
8
  "gradio>=5.20.0",
9
  "huggingface-hub>=0.29.1",
10
  "litellm>=1.61.20",
11
+ "loguru>=0.7.3",
12
  "smolagents>=1.9.2",
13
  ]