Spaces:
Sleeping
Sleeping
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool | |
import datetime | |
import requests | |
import pytz | |
import yaml | |
from tools.final_answer import FinalAnswerTool | |
from Gradio_UI import GradioUI | |
# from huggingface_hub import login | |
# login() | |
def lookup_wikipedia_page(search_query: str) -> str: | |
"""Looks up the exact Wikipedia page title for a given search query, allowing to get the exact page title for subsequent wikipedia views tool | |
Args: | |
search_query: The search term to find the Wikipedia page for | |
""" | |
try: | |
# Use Wikipedia's API to search for pages | |
url = "https://en.wikipedia.org/w/api.php" | |
params = { | |
"action": "query", | |
"format": "json", | |
"list": "search", | |
"srsearch": search_query, | |
"srlimit": 1 | |
} | |
response = requests.get(url, params=params) | |
if response.status_code == 200: | |
data = response.json() | |
if data["query"]["search"]: | |
# Return the exact page title | |
return data["query"]["search"][0]["title"] | |
else: | |
return f"No Wikipedia pages found for '{search_query}'" | |
else: | |
return f"Error searching Wikipedia: {response.status_code}" | |
except Exception as e: | |
return f"Error processing request: {str(e)}" | |
def get_wikipedia_views(article_title: str, window_size_in_days: int) -> str: | |
"""Fetches view statistics for a Wikipedia article. The article title is the exact page title from the lookup_wikipedia_page tool. | |
Args: | |
article_title: The title of the Wikipedia article to get views for | |
window_size_in_days: The number of days to get views for | |
""" | |
try: | |
# Clean article title for URL | |
article_title = article_title.replace(' ', '_') | |
# Construct API URL | |
# First get the page ID using the API | |
id_url = "https://en.wikipedia.org/w/api.php" | |
id_params = { | |
"action": "query", | |
"format": "json", | |
"titles": article_title, | |
"prop": "pageviews", | |
"pvipdays": window_size_in_days | |
} | |
# Get the response data | |
id_response = requests.get(id_url, params=id_params) | |
if id_response.status_code != 200: | |
return f"Error getting page ID: {id_response.status_code}" | |
id_data = id_response.json() | |
# Check if pages data exists | |
if "pages" not in id_data["query"]: | |
return f"Could not find page '{article_title}'" | |
# Get the first page's data | |
page_data = next(iter(id_data["query"]["pages"].values())) | |
if "pageviews" not in page_data: | |
return f"No pageview data available for '{article_title}'" | |
# Calculate total views | |
total_views = sum(views for views in page_data["pageviews"].values() if views is not None) | |
return f"The article '{article_title}' had {total_views} views in the last {window_size_in_days} days" | |
except Exception as e: | |
return f"Error processing request: {str(e)}" | |
final_answer = FinalAnswerTool() | |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: | |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
model = HfApiModel( | |
max_tokens=2096, | |
temperature=0.5, | |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded | |
custom_role_conversions=None, | |
) | |
# Import tool from Hub | |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
with open("prompts.yaml", 'r') as stream: | |
prompt_templates = yaml.safe_load(stream) | |
agent = CodeAgent( | |
model=model, | |
tools=[final_answer, lookup_wikipedia_page, get_wikipedia_views], ## add your tools here (don't remove final answer) | |
max_steps=6, | |
verbosity_level=1, | |
grammar=None, | |
planning_interval=None, | |
name=None, | |
description=None, | |
prompt_templates=prompt_templates | |
) | |
GradioUI(agent).launch() |