Fyahdii's picture
Update app.py
1dfd816 verified
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
import nltk
import networkx as nx
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.tokenize import sent_tokenize
# Ensure necessary NLTK resources are downloaded
nltk.download('punkt_tab')
nltk.download('punkt')
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
@tool
def extract_sent(doc: str, top_n: int = 3) -> list:
""" Extracts key sentences from a document using TextRank.
Args:
doc: The document (e.g., abstract) to extract sentences from.
top_n: The number of top-ranked sentences to return.
"""
try:
# Step 1: Tokenize the document into sentences
sentences = sent_tokenize(doc)
# Step 2: Convert sentences to vector representations (TF-IDF)
vectorizer = TfidfVectorizer()
sentence_vectors = vectorizer.fit_transform(sentences)
# Step 3: Compute similarity matrix (cosine similarity)
similarity_matrix = cosine_similarity(sentence_vectors)
# Step 4: Create a graph where nodes are sentences, and edges are similarities
sentence_graph = nx.from_numpy_array(similarity_matrix)
# Step 5: Apply PageRank algorithm to rank sentences
scores = nx.pagerank(sentence_graph)
# Step 6: Sort sentences by score and return top-N sentences
ranked_sentences = sorted(((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
extracted_sentences = [s for _, s in ranked_sentences[:top_n]]
return "The extracted sentences are:\n" + "\n".join(extracted_sentences)
except Exception as e:
print(f"Error in extract_sent: {e}")
return e
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[get_current_time_in_timezone,image_generation_tool,extract_sent,final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch()