File size: 3,939 Bytes
9b5b26a
 
 
 
c19d193
6aae614
8cc7b01
 
 
429ad9a
8cc7b01
 
 
1dfd816
8cc7b01
8fe992b
9b5b26a
 
5df72d6
9b5b26a
3d1237b
9b5b26a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8c01ffb
 
8cc7b01
 
 
 
d4c4379
 
8cc7b01
 
 
 
 
 
 
 
 
 
429ad9a
8cc7b01
 
 
 
 
 
 
 
 
 
 
0a37373
8cc7b01
 
 
9e89e80
8cc7b01
 
6aae614
ae7a494
 
 
 
e121372
bf6d34c
 
29ec968
fe328e0
13d500a
8c01ffb
 
9b5b26a
 
8c01ffb
861422e
 
9b5b26a
8c01ffb
8fe992b
8cc7b01
8c01ffb
 
 
 
 
 
861422e
8fe992b
 
9b5b26a
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
import nltk
import networkx as nx
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.tokenize import sent_tokenize

# Ensure necessary NLTK resources are downloaded
nltk.download('punkt_tab')
nltk.download('punkt')

from Gradio_UI import GradioUI

# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
    #Keep this format for the description / args / args description but feel free to modify the tool
    """A tool that does nothing yet 
    Args:
        arg1: the first argument
        arg2: the second argument
    """
    return "What magic will you build ?"

@tool
def get_current_time_in_timezone(timezone: str) -> str:
    """A tool that fetches the current local time in a specified timezone.
    Args:
        timezone: A string representing a valid timezone (e.g., 'America/New_York').
    """
    try:
        # Create timezone object
        tz = pytz.timezone(timezone)
        # Get current time in that timezone
        local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
        return f"The current local time in {timezone} is: {local_time}"
    except Exception as e:
        return f"Error fetching time for timezone '{timezone}': {str(e)}"


@tool
def extract_sent(doc: str, top_n: int = 3) -> list:
    """ Extracts key sentences from a document using TextRank.
    Args:
        doc: The document (e.g., abstract) to extract sentences from.
        top_n: The number of top-ranked sentences to return.
    """
    try:
        # Step 1: Tokenize the document into sentences
        sentences = sent_tokenize(doc)

        # Step 2: Convert sentences to vector representations (TF-IDF)
        vectorizer = TfidfVectorizer()
        sentence_vectors = vectorizer.fit_transform(sentences)

        # Step 3: Compute similarity matrix (cosine similarity)
        similarity_matrix = cosine_similarity(sentence_vectors)

        # Step 4: Create a graph where nodes are sentences, and edges are similarities
        sentence_graph = nx.from_numpy_array(similarity_matrix)

        # Step 5: Apply PageRank algorithm to rank sentences
        scores = nx.pagerank(sentence_graph)

        # Step 6: Sort sentences by score and return top-N sentences
        ranked_sentences = sorted(((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
        extracted_sentences = [s for _, s in ranked_sentences[:top_n]]

        return "The extracted sentences are:\n" + "\n".join(extracted_sentences)
    
    except Exception as e:
        print(f"Error in extract_sent: {e}")
        return e


final_answer = FinalAnswerTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' 

model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)


# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
agent = CodeAgent(
    model=model,
    tools=[get_current_time_in_timezone,image_generation_tool,extract_sent,final_answer], ## add your tools here (don't remove final answer)
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates
)


GradioUI(agent).launch()