|
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool,LiteLLMModel, TransformersModel |
|
import datetime |
|
import os |
|
import requests |
|
import pytz |
|
import yaml |
|
from tools.final_answer import FinalAnswerTool |
|
from tools.visit_webpage import VisitWebpageTool |
|
from tools.web_search import DuckDuckGoSearchTool |
|
from tools.text_to_image import TextToImageTool |
|
|
|
from Gradio_UI import GradioUI |
|
|
|
from huggingface_hub import login |
|
|
|
''' |
|
# Below is an example of a tool that does nothing. |
|
@tool |
|
def my_cutom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type |
|
#Keep this format for the description / args / args description but feel free to modify the tool |
|
"""A tool that does nothing yet |
|
Args: |
|
arg1: the first argument |
|
arg2: the second argument |
|
""" |
|
return "What magic will you build ?" |
|
''' |
|
|
|
@tool |
|
def get_current_time_in_timezone(timezone: str) -> str: |
|
"""A tool that fetches the current date and local time in a specified timezone. |
|
Args: |
|
timezone: A string representing a valid timezone (e.g., 'America/New_York'). |
|
""" |
|
try: |
|
|
|
tz = pytz.timezone(timezone) |
|
|
|
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") |
|
return f"The current local time in {timezone} is: {local_time}" |
|
except Exception as e: |
|
return f"Error fetching time for timezone '{timezone}': {str(e)}" |
|
|
|
hf_api_key = os.getenv("HF_API_KEY") |
|
login(token = hf_api_key) |
|
|
|
visit_webpage = VisitWebpageTool() |
|
web_search = DuckDuckGoSearchTool() |
|
final_answer = FinalAnswerTool() |
|
image_generation_tool = TextToImageTool() |
|
|
|
''' |
|
model = HfApiModel( |
|
max_tokens=2096, |
|
temperature=0.5, |
|
model_id='https://wxknx1kg971u7k1n.us-east-1.aws.endpoints.huggingface.cloud',# it is possible that this model may be overloaded |
|
custom_role_conversions=None, |
|
) |
|
|
|
model = TransformersModel( |
|
model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", |
|
device_map="auto", |
|
torch_dtype="auto", |
|
max_new_tokens=2096, |
|
temperature=0.5, |
|
) |
|
''' |
|
model = LiteLLMModel( |
|
model_id="gemini/gemini-2.0-flash-exp", |
|
max_tokens=2096, |
|
temperature=0.6, |
|
api_key=os.getenv("LITELLM_API_KEY") |
|
) |
|
|
|
with open("prompts.yaml", 'r') as stream: |
|
prompt_templates = yaml.safe_load(stream) |
|
|
|
agent = CodeAgent( |
|
model=model, |
|
tools=[get_current_time_in_timezone, visit_webpage, web_search, image_generation_tool, final_answer], |
|
max_steps=6, |
|
verbosity_level=1, |
|
grammar=None, |
|
planning_interval=None, |
|
name=None, |
|
description=None, |
|
prompt_templates=prompt_templates |
|
) |
|
|
|
|
|
GradioUI(agent).launch() |