tonko22 commited on
Commit
ce0ec3b
·
1 Parent(s): d2f2b9d

refactor: decompose monolithic app into modular architecture

Browse files

- Split monolithic app.py into organized modules (tools/, agents/, etc.)
- Separate configuration from implementation in config.py
- Implement proper error handling for API calls
- Create specialized agent modules with clean factory pattern
- Maintain app.py as entry point for HuggingFace compatibility
- Improve code reusability and maintainability

agents/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """
2
+ Agent definitions for lyrics search and analysis components.
3
+ """
agents/analysis_agent.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analysis agent for interpreting song lyrics and providing deeper context.
3
+ """
4
+
5
+ from smolagents import CodeAgent, VisitWebpageTool
6
+ from loguru import logger
7
+
8
+ from config import AGENT_CONFIG, load_prompt_templates
9
+ from tools.search_tools import ThrottledDuckDuckGoSearchTool
10
+ from tools.analysis_tools import analyze_lyrics_tool
11
+
12
+
13
+ def create_analysis_agent(model):
14
+ """
15
+ Create an agent specialized in analyzing and interpreting song lyrics.
16
+
17
+ Args:
18
+ model: The LLM model to use with this agent
19
+
20
+ Returns:
21
+ A configured CodeAgent for lyrics analysis
22
+ """
23
+ # Get configuration values
24
+ config = AGENT_CONFIG['analysis_agent']
25
+ prompt_templates = load_prompt_templates()
26
+
27
+ # Create the throttled search tool
28
+ throttled_search_tool = ThrottledDuckDuckGoSearchTool(
29
+ min_delay=3.0,
30
+ max_delay=7.0
31
+ )
32
+
33
+ # Create and return the agent
34
+ agent = CodeAgent(
35
+ model=model,
36
+ tools=[throttled_search_tool, VisitWebpageTool(), analyze_lyrics_tool],
37
+ name="lyrics_analysis_agent",
38
+ description=config['description'],
39
+ additional_authorized_imports=["numpy", "bs4"],
40
+ max_steps=config['max_steps'],
41
+ verbosity_level=config['verbosity_level'],
42
+ prompt_templates=prompt_templates
43
+ )
44
+
45
+ logger.info("Analysis agent created successfully")
46
+ return agent
agents/manager_agent.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Manager agent for coordinating the lyrics search and analysis process.
3
+ """
4
+
5
+ from smolagents import CodeAgent, FinalAnswerTool
6
+ from loguru import logger
7
+
8
+ from config import AGENT_CONFIG, load_prompt_templates
9
+ from agents.web_agent import create_web_agent
10
+ from agents.analysis_agent import create_analysis_agent
11
+
12
+
13
+ def create_manager_agent(model):
14
+ """
15
+ Create a manager agent that coordinates the web and analysis agents.
16
+
17
+ Args:
18
+ model: The LLM model to use with this agent
19
+
20
+ Returns:
21
+ A configured CodeAgent manager for coordinating other agents
22
+ """
23
+ # Get configuration values
24
+ config = AGENT_CONFIG['manager_agent']
25
+ prompt_templates = load_prompt_templates()
26
+
27
+ # Create sub-agents
28
+ web_agent = create_web_agent(model)
29
+ analysis_agent = create_analysis_agent(model)
30
+
31
+ # Create and return the manager agent
32
+ agent = CodeAgent(
33
+ model=model,
34
+ tools=[FinalAnswerTool()],
35
+ name="manager_agent",
36
+ description=config['description'],
37
+ managed_agents=[web_agent, analysis_agent],
38
+ additional_authorized_imports=["json"],
39
+ planning_interval=config['planning_interval'],
40
+ verbosity_level=config['verbosity_level'],
41
+ max_steps=config['max_steps'],
42
+ prompt_templates=prompt_templates
43
+ )
44
+
45
+ logger.info("Manager agent created successfully")
46
+ return agent
agents/web_agent.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Web agent for finding and extracting song lyrics from online sources.
3
+ """
4
+
5
+ from smolagents import CodeAgent, VisitWebpageTool
6
+ from loguru import logger
7
+
8
+ from config import AGENT_CONFIG, load_prompt_templates
9
+ from tools.search_tools import ThrottledDuckDuckGoSearchTool
10
+
11
+
12
+ def create_web_agent(model):
13
+ """
14
+ Create an agent specialized in web browsing and lyrics extraction.
15
+
16
+ Args:
17
+ model: The LLM model to use with this agent
18
+
19
+ Returns:
20
+ A configured CodeAgent for web searches
21
+ """
22
+ # Get configuration values
23
+ config = AGENT_CONFIG['web_agent']
24
+ prompt_templates = load_prompt_templates()
25
+
26
+ # Create the throttled search tool
27
+ throttled_search_tool = ThrottledDuckDuckGoSearchTool(
28
+ min_delay=3.0,
29
+ max_delay=7.0
30
+ )
31
+
32
+ # Create and return the agent
33
+ agent = CodeAgent(
34
+ model=model,
35
+ tools=[throttled_search_tool, VisitWebpageTool()],
36
+ name="lyrics_search_agent",
37
+ description=config['description'],
38
+ additional_authorized_imports=["numpy", "bs4"],
39
+ max_steps=config['max_steps'],
40
+ verbosity_level=config['verbosity_level'],
41
+ prompt_templates=prompt_templates
42
+ )
43
+
44
+ logger.info("Web agent (lyrics search) created successfully")
45
+ return agent
api_utils.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for making API calls with retry logic and error handling.
3
+ """
4
+
5
+ import time
6
+ import random
7
+ import traceback
8
+ from loguru import logger
9
+ from litellm import completion
10
+
11
+ def make_api_call_with_retry(model: str, prompt: str) -> str:
12
+ """
13
+ Makes an API call with a retry mechanism for error handling.
14
+
15
+ Args:
16
+ model: The model identifier to use.
17
+ prompt: The prompt text to send to the model.
18
+
19
+ Returns:
20
+ The response from the model as a string.
21
+ """
22
+ max_attempts = 20
23
+ base_delay = 10
24
+ max_delay = 60
25
+ attempt = 0
26
+ last_exception = None
27
+
28
+ while attempt < max_attempts:
29
+ try:
30
+ # Add a small random delay to prevent simultaneous requests
31
+ jitter = random.uniform(0.1, 1.0)
32
+ time.sleep(jitter)
33
+
34
+ # If this is a retry attempt, add exponential backoff delay
35
+ if attempt > 0:
36
+ delay = min(base_delay * (2 ** (attempt - 1)), max_delay)
37
+ time.sleep(delay)
38
+
39
+ response = completion(
40
+ model=model,
41
+ messages=[{"role": "user", "content": prompt}],
42
+ num_retries=2, # Built-in retry mechanism of LiteLLM
43
+ )
44
+
45
+ # Try to extract the content from the response
46
+ try:
47
+ analysis_result = response.choices[0].message.content.strip()
48
+ return analysis_result
49
+ except (AttributeError, KeyError, IndexError):
50
+ try:
51
+ analysis_result = response["choices"][0]["message"]["content"].strip()
52
+ return analysis_result
53
+ except (AttributeError, KeyError, IndexError):
54
+ # If we couldn't extract the content, return an error
55
+ raise ValueError("Failed to extract content from response")
56
+
57
+ except (ConnectionError, TimeoutError) as e:
58
+ last_exception = e
59
+ logger.warning("API call failed (attempt {}/{}) for model {}: {}. Retrying...", attempt+1, max_attempts, model, str(e))
60
+ attempt += 1
61
+ continue
62
+ except Exception as e:
63
+ logger.error("Unexpected error: {}", str(e))
64
+ logger.error(traceback.format_exc())
65
+ raise # For other exceptions, we don't retry
66
+
67
+ # If all attempts failed, re-raise the last exception
68
+ if last_exception:
69
+ logger.error("All {} attempts failed. Last error: {}", max_attempts, str(last_exception))
70
+ raise last_exception
app.py CHANGED
@@ -1,225 +1,49 @@
1
- import os
2
- import time
3
- import random
4
- import yaml
5
- import traceback
6
- from loguru import logger
7
- from Gradio_UI import GradioUI
8
- from litellm import completion
9
- from smolagents import (
10
- CodeAgent,
11
- DuckDuckGoSearchTool,
12
- FinalAnswerTool,
13
- LiteLLMModel,
14
- VisitWebpageTool,
15
- tool,
16
- Tool,
17
- )
18
-
19
- # Setting up logging with loguru - only terminal output
20
- logger.remove() # Remove default handlers
21
- logger.add(
22
- lambda msg: print(msg, end=""),
23
- level="INFO",
24
- format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{message}</cyan>")
25
-
26
- # API key configuration
27
- os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
28
-
29
-
30
- class LyricsSearchTool(Tool):
31
- """
32
- Uses web search to find song lyrics based on song title and artist name
33
-
34
- The search query should include the song title and artist name. The tool
35
- will return the lyrics of the song if found.
36
-
37
- Parameters
38
- ----------
39
- query : str
40
- The search query for finding song lyrics. Should include song title and artist name.
41
-
42
- Returns
43
- -------
44
- str
45
- The lyrics of the song if found, otherwise an empty string.
46
- """
47
- name = "lyrics_search_tool"
48
- description = "Uses web search to find song lyrics based on song title and artist name"
49
- inputs = {
50
- "query": {
51
- "type": "string",
52
- "description": "The search query for finding song lyrics. Should include song title and artist name.",
53
- }
54
- }
55
- output_type = "string"
56
-
57
- def __init__(self, **kwargs):
58
- super().__init__(**kwargs)
59
-
60
- def forward(self, query: str) -> str:
61
- assert isinstance(query, str), "Your search query must be a string"
62
- # TODO: Implement lyrics search functionality
63
- return "Lyrics search not implemented yet"
64
-
65
- with open("prompts.yaml", 'r') as stream:
66
- prompt_templates = yaml.safe_load(stream)
67
-
68
- @tool
69
- def analyze_lyrics_tool(song_title: str, artist: str, lyrics: str) -> str:
70
- """
71
- Performs a deep analysis of the musical track, given its metadata.
72
-
73
- Args:
74
- song_title: Title of the song or music track.
75
- artist: The name of the artist.
76
- lyrics: The lyrics of the song.
77
-
78
- Returns:
79
- A summary of the song's meaning in English.
80
- """
81
 
82
- prompt = f"""You are an expert in songs and their meanings.
83
- Summarize the meaning of {song_title} by {artist} and identify
84
- key themes based on the lyrics:
85
- {lyrics}.
86
 
87
- Include deep idea and vibes analysis with explanations
88
- based on references to the exact lines.
89
- """
90
-
91
- # If the USE_ANTHROPIC environment variable is defined, use the Claude model
92
- if os.getenv("USE_ANTHROPIC", "false").lower() == "true":
93
- model_to_use = "claude-3-haiku-20240307"
94
- logger.info("Using Anthropic model: {} for lyrics analysis", model_to_use)
95
- else:
96
- model_to_use = "gemini/gemini-2.0-flash"
97
- logger.info("Using Gemini model: {} for lyrics analysis", model_to_use)
98
 
99
- # Use the function with retry mechanism
100
- logger.info("Analyzing lyrics for song: '{}' by '{}'", song_title, artist)
101
- return _make_api_call_with_retry(model_to_use, prompt)
102
 
103
 
104
- # Function with manual implementation of retry mechanism
105
- def _make_api_call_with_retry(model: str, prompt: str) -> str:
106
  """
107
- Makes an API call with a retry mechanism for error handling.
108
-
109
- Args:
110
- model: The model identifier to use.
111
- prompt: The prompt text to send to the model.
112
-
113
- Returns:
114
- The response from the model as a string.
115
  """
116
- max_attempts = 20
117
- base_delay = 10
118
- max_delay = 60
119
- attempt = 0
120
- last_exception = None
121
 
122
- while attempt < max_attempts:
123
- try:
124
- # Add a small random delay to prevent simultaneous requests
125
- jitter = random.uniform(0.1, 1.0)
126
- time.sleep(jitter)
127
-
128
- # If this is a retry attempt, add exponential backoff delay
129
- if attempt > 0:
130
- delay = min(base_delay * (2 ** (attempt - 1)), max_delay)
131
- time.sleep(delay)
132
-
133
- response = completion(
134
- model=model,
135
- messages=[{"role": "user", "content": prompt}],
136
- num_retries=2, # Built-in retry mechanism of LiteLLM
137
- )
138
-
139
- # Try to extract the content from the response
140
- try:
141
- analysis_result = response.choices[0].message.content.strip()
142
- return analysis_result
143
- except (AttributeError, KeyError, IndexError):
144
- try:
145
- analysis_result = response["choices"][0]["message"]["content"].strip()
146
- return analysis_result
147
- except (AttributeError, KeyError, IndexError):
148
- # If we couldn't extract the content, return an error
149
- raise ValueError("Failed to extract content from response")
150
-
151
- except (ConnectionError, TimeoutError) as e:
152
- last_exception = e
153
- logger.warning("API call failed (attempt {}/{}) for model {}: {}. Retrying...", attempt+1, max_attempts, model, str(e))
154
- attempt += 1
155
- continue
156
- except Exception as e:
157
- logger.error("Unexpected error: {}", str(e))
158
- logger.error(traceback.format_exc())
159
- raise # For other exceptions, we don't retry
160
 
161
- # If all attempts failed, re-raise the last exception
162
- if last_exception:
163
- logger.error("All {} attempts failed. Last error: {}", max_attempts, str(last_exception))
164
- raise last_exception
165
-
166
-
167
- # TODO: use DuckDuckGoSearchTool to find related information
168
- # for explanation in case the LLM itself is not confident or doesn't know
169
-
170
- # Check if we need to use Anthropic for local testing
171
- use_anthropic = os.getenv("USE_ANTHROPIC", "false").lower() == "true"
172
-
173
- # Configure Anthropic API key if needed
174
- if use_anthropic:
175
- os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")
176
- model = LiteLLMModel(model_id="claude-3-haiku-20240307")
177
- logger.info("Using Anthropic Claude model for local testing")
178
- else:
179
- model = LiteLLMModel(model_id="gemini/gemini-2.0-flash")
180
- logger.info("Using Gemini model as default")
181
-
182
- web_agent = CodeAgent(
183
- model=model,
184
- tools=[DuckDuckGoSearchTool(), VisitWebpageTool()],
185
- name="lyrics_search_agent",
186
- description="Browses the web to find original full lyrics and scrape them. Excels at building effective search queries",
187
- additional_authorized_imports=["numpy", "bs4"],
188
- max_steps=50,
189
- verbosity_level=2,
190
- prompt_templates=prompt_templates
191
- )
192
-
193
-
194
- analysis_agent = CodeAgent(
195
- model=model,
196
- tools=[DuckDuckGoSearchTool(), VisitWebpageTool(), analyze_lyrics_tool],
197
- name="lyrics_analysis_agent",
198
- description="You are a Song Analysis Expert with deep knowledge of music theory, lyrical interpretation, cultural contexts, and music history. Your role is to analyze song lyrics to uncover their deeper meaning, artistic significance, and historical context.",
199
- additional_authorized_imports=["numpy", "bs4"],
200
- max_steps=5,
201
- verbosity_level=2,
202
- prompt_templates=prompt_templates
203
- )
204
-
205
-
206
- # When using the DuckDuckGoSearchTool, clearly indicate when information comes from external research versus your own knowledge base.
207
- manager_agent = CodeAgent(
208
- model=model,
209
- tools=[FinalAnswerTool()],
210
- name="manager_agent",
211
- description="Manages the search process and coordinates the search and analysis of song lyrics.",
212
- managed_agents=[web_agent, analysis_agent],
213
- additional_authorized_imports=["json"],
214
- planning_interval=5,
215
- verbosity_level=2,
216
- max_steps=15,
217
- prompt_templates=prompt_templates
218
- )
219
-
220
- logger.info("Initializing Gradio UI and launching server")
221
- GradioUI(manager_agent).launch(
222
- debug=True, share=True,
223
- server_name="127.0.0.1", server_port=3000
224
- )
225
- logger.success("Server started successfully")
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Lyrics Analyzer Agent - Main Entry Point
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ This module serves as the entry point for the Lyrics Analyzer application, which
6
+ uses a system of specialized agents to search for and analyze song lyrics.
7
+ """
 
8
 
9
+ from loguru import logger
10
+ from Gradio_UI import GradioUI
11
+ from smolagents import LiteLLMModel
 
 
 
 
 
 
 
 
12
 
13
+ from config import setup_logger, load_api_keys, get_model_id, GRADIO_CONFIG
14
+ from agents.manager_agent import create_manager_agent
 
15
 
16
 
17
+ def main():
 
18
  """
19
+ Main function to initialize and run the Lyrics Analyzer Agent.
20
+
21
+ This function sets up logging, loads API keys, initializes the LLM model,
22
+ and starts the Gradio UI server with the manager agent.
 
 
 
 
23
  """
24
+ # Setup logger and API keys
25
+ setup_logger()
26
+ load_api_keys()
 
 
27
 
28
+ # Initialize the LLM model based on configuration
29
+ model_id = get_model_id()
30
+ logger.info(f"Initializing with model: {model_id}")
31
+ model = LiteLLMModel(model_id=model_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ # Create the manager agent which will create and manage the other agents
34
+ manager_agent = create_manager_agent(model)
35
+
36
+ # Start the Gradio UI server
37
+ logger.info("Initializing Gradio UI and launching server")
38
+ GradioUI(manager_agent).launch(
39
+ debug=GRADIO_CONFIG['debug'],
40
+ share=GRADIO_CONFIG['share'],
41
+ server_name=GRADIO_CONFIG['server_name'],
42
+ server_port=GRADIO_CONFIG['server_port']
43
+ )
44
+ logger.success("Server started successfully")
45
+
46
+
47
+ # Run the application when executed directly
48
+ if __name__ == "__main__":
49
+ main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuration parameters for the Lyrics Analyzer Agent.
3
+
4
+ This module separates configuration from implementation,
5
+ making it easier to modify settings without changing code.
6
+ """
7
+
8
+ import os
9
+ import yaml
10
+ from loguru import logger
11
+
12
+ # Logger configuration
13
+ def setup_logger():
14
+ """Configure loguru logger with custom formatting."""
15
+ logger.remove() # Remove default handlers
16
+ logger.add(
17
+ lambda msg: print(msg, end=""),
18
+ level="INFO",
19
+ format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{message}</cyan>"
20
+ )
21
+
22
+ # API configuration
23
+ def load_api_keys():
24
+ """Load API keys from environment variables."""
25
+ # Gemini API is the default
26
+ os.environ["GEMINI_API_KEY"] = os.getenv("GEMINI_API_KEY")
27
+
28
+ # Anthropic API for local testing
29
+ if use_anthropic():
30
+ os.environ["ANTHROPIC_API_KEY"] = os.getenv("ANTHROPIC_API_KEY")
31
+
32
+ # Default model configuration
33
+ def use_anthropic():
34
+ """Check if we should use Anthropic instead of Gemini."""
35
+ return os.getenv("USE_ANTHROPIC", "false").lower() == "true"
36
+
37
+ def get_model_id():
38
+ """Get the appropriate model ID based on configuration."""
39
+ if use_anthropic():
40
+ return "claude-3-haiku-20240307"
41
+ else:
42
+ return "gemini/gemini-2.0-flash"
43
+
44
+ # Load prompts from YAML
45
+ def load_prompt_templates():
46
+ """Load prompt templates from YAML file."""
47
+ try:
48
+ with open("prompts.yaml", 'r') as stream:
49
+ return yaml.safe_load(stream)
50
+ except (FileNotFoundError, yaml.YAMLError) as e:
51
+ logger.error(f"Error loading prompts.yaml: {e}")
52
+ return {} # Return empty dict to avoid breaking the application
53
+
54
+ # Tool configuration
55
+ SEARCH_TOOL_CONFIG = {
56
+ "min_delay": 3.0,
57
+ "max_delay": 7.0
58
+ }
59
+
60
+ # Agent configuration
61
+ AGENT_CONFIG = {
62
+ "web_agent": {
63
+ "max_steps": 50,
64
+ "verbosity_level": 2,
65
+ "description": "Browses the web to find original full lyrics and scrape them. Excels at building effective search queries"
66
+ },
67
+ "analysis_agent": {
68
+ "max_steps": 5,
69
+ "verbosity_level": 2,
70
+ "description": "You are a Song Analysis Expert with deep knowledge of music theory, lyrical interpretation, cultural contexts, and music history. Your role is to analyze song lyrics to uncover their deeper meaning, artistic significance, and historical context."
71
+ },
72
+ "manager_agent": {
73
+ "max_steps": 15,
74
+ "verbosity_level": 2,
75
+ "planning_interval": 5,
76
+ "description": "Manages the search process and coordinates the search and analysis of song lyrics."
77
+ }
78
+ }
79
+
80
+ # Gradio UI configuration
81
+ GRADIO_CONFIG = {
82
+ "debug": True,
83
+ "share": True,
84
+ "server_name": "127.0.0.1",
85
+ "server_port": 3000
86
+ }
tools/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """
2
+ Tools for interacting with search engines and analyzing song lyrics.
3
+ """
tools/analysis_tools.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analysis tools for understanding and interpreting song lyrics.
3
+ """
4
+
5
+ import os
6
+ from loguru import logger
7
+ from smolagents import tool
8
+
9
+ from api_utils import make_api_call_with_retry
10
+
11
+ @tool
12
+ def analyze_lyrics_tool(song_title: str, artist: str, lyrics: str) -> str:
13
+ """
14
+ Performs a deep analysis of the musical track, given its metadata.
15
+
16
+ Args:
17
+ song_title: Title of the song or music track.
18
+ artist: The name of the artist.
19
+ lyrics: The lyrics of the song.
20
+
21
+ Returns:
22
+ A summary of the song's meaning in English.
23
+ """
24
+
25
+ prompt = f"""You are an expert in songs and their meanings.
26
+ Summarize the meaning of {song_title} by {artist} and identify
27
+ key themes based on the lyrics:
28
+ {lyrics}.
29
+
30
+ Include deep idea and vibes analysis with explanations
31
+ based on references to the exact lines.
32
+ """
33
+
34
+ # Determine which model to use based on configuration
35
+ if os.getenv("USE_ANTHROPIC", "false").lower() == "true":
36
+ model_to_use = "claude-3-haiku-20240307"
37
+ logger.info("Using Anthropic model: {} for lyrics analysis", model_to_use)
38
+ else:
39
+ model_to_use = "gemini/gemini-2.0-flash"
40
+ logger.info("Using Gemini model: {} for lyrics analysis", model_to_use)
41
+
42
+ # Use the function with retry mechanism
43
+ logger.info("Analyzing lyrics for song: '{}' by '{}'", song_title, artist)
44
+ return make_api_call_with_retry(model_to_use, prompt)
tools/search_tools.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Search tools for finding song lyrics and related information.
3
+ """
4
+
5
+ import time
6
+ import random
7
+ from typing import Dict, List, Any
8
+ from loguru import logger
9
+ from smolagents import DuckDuckGoSearchTool, Tool
10
+
11
+ class ThrottledDuckDuckGoSearchTool(DuckDuckGoSearchTool):
12
+ """
13
+ A wrapper around DuckDuckGoSearchTool that adds a delay between requests
14
+ to avoid rate limiting issues.
15
+
16
+ This tool implements a delay mechanism to prevent hitting DuckDuckGo's rate limits.
17
+ Each search request will be followed by a random delay within the specified range.
18
+ """
19
+
20
+ def __init__(self, min_delay: float = 2.0, max_delay: float = 5.0, **kwargs):
21
+ """
22
+ Initialize the throttled search tool with delay parameters.
23
+
24
+ Args:
25
+ min_delay: Minimum delay in seconds between requests (default: 2.0)
26
+ max_delay: Maximum delay in seconds between requests (default: 5.0)
27
+ **kwargs: Additional arguments to pass to DuckDuckGoSearchTool
28
+ """
29
+ super().__init__(**kwargs)
30
+ self.min_delay = min_delay
31
+ self.max_delay = max_delay
32
+ self.name = "search" # Keep the same name as the parent class
33
+ logger.info(f"Initialized ThrottledDuckDuckGoSearchTool with delay range: {min_delay}-{max_delay}s")
34
+
35
+ def forward(self, query: str) -> List[Dict[str, Any]]:
36
+ """
37
+ Execute a search with a delay to avoid rate limiting.
38
+
39
+ Args:
40
+ query: The search query string
41
+
42
+ Returns:
43
+ List of search results
44
+ """
45
+ # Add a random delay before the search to avoid rate limiting
46
+ delay = random.uniform(self.min_delay, self.max_delay)
47
+ logger.info(f"Throttling DuckDuckGo search for {delay:.2f} seconds before query: '{query}'")
48
+ time.sleep(delay)
49
+
50
+ # Call the parent class implementation
51
+ try:
52
+ results = super().forward(query)
53
+ # Add another delay after the search to ensure spacing between requests
54
+ time.sleep(random.uniform(self.min_delay / 2, self.max_delay / 2))
55
+ return results
56
+ except Exception as e:
57
+ logger.error(f"Error in DuckDuckGo search: {str(e)}")
58
+ # Return empty results on error to allow the agent to continue
59
+ return [{"title": "Search error", "href": "", "body": f"Error performing search: {str(e)}"}]
60
+
61
+
62
+ class LyricsSearchTool(Tool):
63
+ """
64
+ Uses web search to find song lyrics based on song title and artist name
65
+
66
+ The search query should include the song title and artist name. The tool
67
+ will return the lyrics of the song if found.
68
+
69
+ Parameters
70
+ ----------
71
+ query : str
72
+ The search query for finding song lyrics. Should include song title and artist name.
73
+
74
+ Returns
75
+ -------
76
+ str
77
+ The lyrics of the song if found, otherwise an empty string.
78
+ """
79
+ name = "lyrics_search_tool"
80
+ description = "Uses web search to find song lyrics based on song title and artist name"
81
+ inputs = {
82
+ "query": {
83
+ "type": "string",
84
+ "description": "The search query for finding song lyrics. Should include song title and artist name.",
85
+ }
86
+ }
87
+ output_type = "string"
88
+
89
+ def __init__(self, **kwargs):
90
+ super().__init__(**kwargs)
91
+
92
+ def forward(self, query: str) -> str:
93
+ assert isinstance(query, str), "Your search query must be a string"
94
+ # TODO: Implement lyrics search functionality
95
+ return "Lyrics search not implemented yet"