tfrere's picture
add model factory
b0bf659
"""
Agent management for the leaderboard parser.
"""
import os
import datetime
import json
import requests
from smolagents import CodeAgent
from smolagents.models import HfApiModel
from src.agents.model_factory import get_model as get_default_model
from src.agents.browser import save_screenshot
from src.agents.tools import (
map_clickable_elements,
close_popups,
extract_table_data,
find_leaderboard_elements,
go_back,
search_item_ctrl_f,
copy_link_from_element,
validate_json_results,
find_model_links,
click_at_coordinates,
)
def load_model(model_type, model_id):
"""
Load a model by its type and ID.
Args:
model_type: The type of model to load
model_id: The ID of the model
Returns:
The loaded model
"""
if model_type == "HfApiModel":
return HfApiModel(model_id=model_id)
elif model_type == "LiteLLMModel":
from smolagents import LiteLLMModel
return LiteLLMModel(model_id)
else:
raise ValueError(f"Unknown model type: {model_type}")
def initialize_agent(model):
"""
Initialize an agent with the given model.
Args:
model: The model to use for the agent
Returns:
The initialized agent
"""
return CodeAgent(
tools=[go_back, map_clickable_elements, validate_json_results, close_popups, search_item_ctrl_f, extract_table_data, find_leaderboard_elements, copy_link_from_element, find_model_links, click_at_coordinates],
model=model,
additional_authorized_imports=["selenium", "helium", "time", "json", "re", "src.agents.browser"],
step_callbacks=[save_screenshot],
max_steps=25,
verbosity_level=2,
)
# Instructions for the agent
leaderboard_instructions = """
Your task is to extract the three BEST models from the leaderboard. It is crucial that you identify the models that are at the top of the ranking, not just any three models present on the page.
You must also identify the main criterion on which the models are evaluated (for example: accuracy, speed, performance on a specific benchmark, etc.). Formulate a short description (less than 60 words) that explains what the models are judged on.
For each model, try to find a link to its page or repository. This can be any link (GitHub, Hugging Face, model website, etc.). If you cannot find a link for a model, indicate null for this field.
IMPORTANT: If you fail to clearly identify the top three models AND the evaluation criterion, the leaderboard will be rejected. It is essential that you provide this information accurately and completely.
You can use helium to navigate the website. We have already executed "from helium import *".
You can go to pages with:
```py
go_to('url')
```<end_code>
You can click on clickable elements by entering the text that appears on them:
```py
click("Button text")
```<end_code>
If it's a link:
```py
click(Link("Link text"))
```<end_code>
To scroll up or down, use scroll_down or scroll_up with the number of pixels as an argument:
```py
scroll_down(num_pixels=1200) # This will scroll down one view
```<end_code>
To close popups with an X icon, use the built-in tool `close_popups`:
```py
close_popups()
```<end_code>
You can use .exists() to check for the existence of an element:
```py
if Text('Accept cookies?').exists():
click('I accept')
```<end_code>
If you encounter situations where you cannot click on elements using text, you can use click_at_coordinates to click at specific x,y coordinates on the page:
```py
click_at_coordinates(x=500, y=300) # Click at the position 500px from left, 300px from top
```<end_code>
If pages seem stuck while loading, you may need to wait:
```py
import time
time.sleep(20.0) # Wait at least 10 seconds for the initial loading
```<end_code>
To extract data from a table, use the extract_table_data tool:
```py
table_info = extract_table_data()
print(table_info)
```<end_code>
If you cannot easily find a standard table, use find_leaderboard_elements to search for elements that might contain ranking data:
```py
leaderboard_elements = find_leaderboard_elements()
print(leaderboard_elements)
```<end_code>
RECOMMENDED METHODS FOR FINDING MODEL LINKS:
```py
# For a model named "BERT-Large"
model_name = "BERT-Large"
links_info = find_model_links(model_name)
print(links_info)
# If links were found, the best candidate is displayed at the end of the result
if "Best candidate for" in links_info:
# Extract the URL of the best candidate
best_url_line = links_info.split("Best candidate for")[1].split("\n")[1]
url = best_url_line.replace("URL:", "").strip()
print(f"URL for model {model_name}: {url}")
else:
print(f"No link found for model {model_name}")
url = None
```<end_code>
IMPORTANT: If none of the methods can find a URL, do NOT try other methods such as extracting URLs from the source code. Simply use null for the model URL. It is better to have a missing URL (null) than an incorrect or irrelevant URL.
IMPORTANT - PAGE EXPLORATION ORDER:
If you don't immediately see the leaderboard table or ranking information, STRICTLY follow this order:
1. ABSOLUTE PRIORITY:
Look for and click on buttons, tabs, or links with text like "Leaderboard", "Results", "Ranking", "Benchmark", "Scores", "Evaluation", etc.
Examine ALL visible buttons and tabs before moving to the next step.
IMPORTANT: Be flexible with text matching! Some elements may contain emojis or other characters before/after the keywords.
```py
# Examples of searching for leaderboard buttons/tabs
for text in ["🏆 Leaderboard", "Leaderboard", "Results", "Ranking", "Benchmark", "Scores", "Evaluation", "Performance"]:
if Button(text).exists() or Link(text).exists() or Text(text).exists():
print(f"Found clickable element: {text}")
click(text)
time.sleep(5) # Wait for the page to update
break
# If exact matches fail, try more flexible matching
# This is crucial for elements with emojis or other characters
if True: # This will execute if no exact match was found above
print("No exact matches found. Trying flexible text matching...")
import time
from src.agents.browser import driver
from selenium.webdriver.common.by import By
for text in ["🏆 Leaderboard", "Leaderboard", "Results", "Ranking", "Benchmark", "Scores"]:
# Try to find elements CONTAINING the text (not exact match)
matching_elements = driver.find_elements(By.XPATH, f"//*[contains(text(), '{text}')]")
if matching_elements:
print(f"Found {len(matching_elements)} elements containing '{text}'")
for element in matching_elements[:3]: # Try first three matches
try:
element_text = element.text
print(f"Element text: '{element_text}'")
driver.execute_script("arguments[0].scrollIntoView(true);", element)
time.sleep(1)
element.click()
print(f"Successfully clicked on element with text: '{element_text}'")
time.sleep(5)
break
except Exception as e:
print(f"Could not click: {e}")
# Try JavaScript click as fallback
try:
driver.execute_script("arguments[0].click();", element)
print(f"Clicked using JavaScript on element with text: '{element_text}'")
time.sleep(5)
break
except:
continue
```<end_code>
2. ONLY AFTER checking all buttons and tabs, scroll down to see if the content is lower down:
```py
scroll_down(1200) # Try scrolling to see more content
```<end_code>
3. Check if there are dropdown menus or filters to activate
4. Explore the different sections of the page
Proceed step by step:
1. Navigate to the provided URL
2. Wait for the page to load completely (use time.sleep(20.0))
3. EXPLORE the page by STRICTLY following the order above (first buttons/tabs, then scroll if necessary)
4. Look for the table or section containing the model ranking
5. Identify the three BEST models (those at the top of the ranking) ( DO NOT CHANGE MODEL NAMES UNDER ANY CIRCUMSTANCES )
6. Determine the main evaluation criterion for the models
7. IMPORTANT : For each identified model, use the method described above to find its URL. If the URL is not found, use null.
8. If you cannot find links at the first try for any reason, you can try again with the same method if you want.
9. Validate the results using the validate_json_results tool. VERY IMPORTANT TO DO BEFORE SENDING RESULTS.
10. Send final results
```py
final_answer({
"top_models": [
{"rank": 1, "name": "Model name 1", "url": "Model URL or null if not available"},
{"rank": 2, "name": "Model name 2", "url": "Model URL or null if not available"},
{"rank": 3, "name": "Model name 3", "url": "Model URL or null if not available"}
],
"evaluation_criteria": "Short description of the evaluation criterion (less than 60 words)"
})
```<end_code>
After each block of code you write, you will automatically receive an updated screenshot of the browser and the current URL of the browser.
But be careful, the screenshot will only be taken at the end of the complete action, it will not see intermediate states.
IMPORTANT: DO NOT CHANGE MODEL NAMES UNDER ANY CIRCUMSTANCES
"""
def validate_results(result):
"""Checks that the results do not contain generic placeholders."""
if not result or not isinstance(result, dict):
return False, "Invalid result"
if "top_models" not in result or len(result.get("top_models", [])) < 3:
return False, "Less than 3 models found"
# Check for generic names
generic_names = ["model a", "model b", "model c", "model 1", "model 2", "model 3", "model name", "unavailable"]
model_names = [m.get("name", "").lower() for m in result.get("top_models", [])]
if any(name in generic_names for name in model_names):
return False, "Generic model names detected"
# Check for generic URLs
generic_urls = ["example.com", "example.org"]
model_urls = [m.get("url", "").lower() for m in result.get("top_models", []) if m.get("url") is not None]
if any(generic in url for url in model_urls for generic in generic_urls):
return False, "Generic URLs detected"
# Check the evaluation criterion
if "evaluation_criteria" not in result or len(result.get("evaluation_criteria", "")) < 10:
return False, "Evaluation criterion missing or too short"
return True, "Valid results"
def process_leaderboard(url, model, index, uid=None, additional_rules=None):
"""
Process a single leaderboard URL and return the results.
Args:
url: The URL of the leaderboard to process
model: The LLM model to use
index: The index of the leaderboard in the list
uid: The UID of the leaderboard (for saving screenshots)
additional_rules: Additional rules specific to this leaderboard
Returns:
A dictionary with the results or error information
"""
from src.agents.browser import initialize_driver, close_driver
print(f"\n\n{'='*50}")
print(f"Processing leaderboard {index+1}: {url}")
if uid:
print(f"UID: {uid}")
if additional_rules:
print(f"Additional rules: {additional_rules}")
print(f"{'='*50}\n")
# Get current date and time
now = datetime.datetime.now()
parsed_at = now.isoformat()
# Vérifier si le modèle est None
if model is None:
return {
"results": None,
"parsing_status": "error",
"parsing_message": "Model initialization failed - check HUGGING_FACE_INFERENCE_ENDPOINT_URL and HUGGING_FACE_HUB_TOKEN",
"parsed_at": parsed_at
}
initialize_driver()
agent = initialize_agent(model)
# Create the prompt with the target URL
prompt = f"Visit {url} and extract the three BEST models from the leaderboard (those at the top of the ranking). Also identify the main evaluation criterion for the models and look for links associated with the models."
# Add additional rules if provided
instructions = leaderboard_instructions
if additional_rules:
instructions = f"""
ADDITIONAL RULES SPECIFIC TO THIS LEADERBOARD:
{additional_rules}
{leaderboard_instructions}
ADDITIONAL RULES SPECIFIC TO THIS LEADERBOARD:
{additional_rules}
"""
try:
# Run the agent with the provided prompt
agent.python_executor("from helium import *")
result = agent.run(prompt + instructions)
print(f"\nResult for {url}:")
print(result)
# Check if the result is None or empty
if not result:
return {
"results": None,
"parsing_status": "error",
"parsing_message": "Empty result from agent",
"parsed_at": parsed_at
}
# Validate the results
is_valid, reason = validate_results(result)
if not is_valid:
print(f"WARNING: {reason}")
return {
"results": result,
"parsing_status": "invalid",
"parsing_message": reason,
"parsed_at": parsed_at
}
# Make sure the response is in the correct format
if not isinstance(result, dict) or "top_models" not in result:
print("WARNING: Agent did not use final_answer() correctly")
return {
"results": None,
"parsing_status": "error",
"parsing_message": "Agent returned improperly formatted response (did not use final_answer correctly)",
"parsed_at": parsed_at
}
return {
"results": result,
"parsing_status": "success",
"parsed_at": parsed_at
}
except Exception as e:
print(f"An error occurred while processing {url}: {e}")
return {
"results": None,
"parsing_status": "error",
"parsing_message": str(e),
"parsed_at": parsed_at
}
finally:
# Ensure browser is closed
close_driver()