reichaves commited on
Commit
4d98ee1
·
verified ·
1 Parent(s): ae7a494

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -31
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
  import requests
4
  import pytz
@@ -7,17 +7,7 @@ from tools.final_answer import FinalAnswerTool
7
 
8
  from Gradio_UI import GradioUI
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
- @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
15
- Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
- """
19
- return "What magic will you build ?"
20
-
21
  @tool
22
  def get_current_time_in_timezone(timezone: str) -> str:
23
  """A tool that fetches the current local time in a specified timezone.
@@ -25,45 +15,102 @@ def get_current_time_in_timezone(timezone: str) -> str:
25
  timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
  """
27
  try:
28
- # Create timezone object
29
  tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
 
31
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
 
 
32
  return f"The current local time in {timezone} is: {local_time}"
33
  except Exception as e:
 
34
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
 
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  final_answer = FinalAnswerTool()
38
 
39
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
 
 
 
 
42
  model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
  )
48
 
49
-
50
- # Import tool from Hub
 
51
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
 
 
 
 
53
  with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
 
 
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
- max_steps=6,
60
- verbosity_level=1,
61
- grammar=None,
62
- planning_interval=None,
63
- name=None,
64
- description=None,
65
- prompt_templates=prompt_templates
 
 
 
 
 
66
  )
67
 
68
-
 
69
  GradioUI(agent).launch()
 
1
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
2
  import datetime
3
  import requests
4
  import pytz
 
7
 
8
  from Gradio_UI import GradioUI
9
 
10
+ # 1 - Timezone tool
 
 
 
 
 
 
 
 
 
 
11
  @tool
12
  def get_current_time_in_timezone(timezone: str) -> str:
13
  """A tool that fetches the current local time in a specified timezone.
 
15
  timezone: A string representing a valid timezone (e.g., 'America/New_York').
16
  """
17
  try:
18
+ # Create timezone object using pytz library
19
  tz = pytz.timezone(timezone)
20
+
21
+ # Get current time in that timezone and format it as a readable string
22
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
23
+
24
+ # Return formatted response with the timezone and current time
25
  return f"The current local time in {timezone} is: {local_time}"
26
  except Exception as e:
27
+ # Handle any errors that might occur (invalid timezone, etc.)
28
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
29
 
30
 
31
+ # 2 - Image generation
32
+ @tool
33
+ def generate_image_from_text(prompt: str) -> str:
34
+ """A tool that generates an image based on a text description.
35
+ Args:
36
+ prompt: A detailed text description of the image you want to generate.
37
+ """
38
+ try:
39
+ # Call the image generation tool loaded from Hugging Face Hub
40
+ # The tool is loaded further down in the script before it's used here
41
+ result = image_generation_tool(prompt)
42
+
43
+ # Return success message with the result (which should contain image URL or path)
44
+ return f"Image generated successfully: {result}"
45
+ except Exception as e:
46
+ # Handle any errors that occur during image generation
47
+ return f"Error generating image: {str(e)}"
48
+
49
+ # 3 - Web search
50
+ # Initialize the DuckDuckGo search tool
51
+ search_tool = DuckDuckGoSearchTool()
52
+
53
+ @tool
54
+ def search_web(query: str) -> str:
55
+ """A tool that searches the web using DuckDuckGo for information.
56
+ Args:
57
+ query: The search query to find information on the web.
58
+ """
59
+ try:
60
+ # Execute the search query using DuckDuckGo
61
+ search_results = search_tool(query)
62
+
63
+ # Format and return the search results
64
+ return f"Search results for '{query}':\n\n{search_results}"
65
+ except Exception as e:
66
+ # Handle any errors that occur during the search
67
+ return f"Error searching the web: {str(e)}"
68
+
69
+
70
+ # This tool is required for the agent to provide final answers
71
  final_answer = FinalAnswerTool()
72
 
 
 
73
 
74
+ # Model configuration
75
+ # If the agent does not answer, the model is overloaded
76
+ # Alternative endpoint: 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
77
  model = HfApiModel(
78
+ max_tokens=2096, # Maximum number of tokens in the response
79
+ temperature=0.5, # Controls randomness: lower = more deterministic
80
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct', # Using Qwen 2.5 Coder model
81
+ custom_role_conversions=None,
82
  )
83
 
84
+ # Load External Tools
85
+ # Import the image generation tool from Hugging Face Hub
86
+ # This tool will be used by the generate_image_from_text function
87
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
88
 
89
+
90
+ # Load Prompt Templates
91
+ # Load prompt templates from YAML file for consistent agent responses
92
  with open("prompts.yaml", 'r') as stream:
93
  prompt_templates = yaml.safe_load(stream)
94
 
95
+
96
+ # Agent Configuration
97
  agent = CodeAgent(
98
  model=model,
99
+ tools=[
100
+ get_current_time_in_timezone, # Tool 1: Time zone tool
101
+ generate_image_from_text, # Tool 2: Image generation tool
102
+ search_web, # Tool 3: Web search tool
103
+ final_answer # Required final answer tool
104
+ ],
105
+ max_steps=6, # Maximum number of reasoning steps
106
+ verbosity_level=1, # Level of detail in agent's output
107
+ grammar=None, # No specific grammar constraints
108
+ planning_interval=None, # No specific planning interval
109
+ name=None, # No custom agent name
110
+ description=None, # No custom agent description
111
+ prompt_templates=prompt_templates # Using loaded prompt templates
112
  )
113
 
114
+ # Launch GRADIO UI
115
+ # Start the Gradio interface with our configured agent
116
  GradioUI(agent).launch()