Upload 6 files
Browse files- README.md +8 -7
- agents/__init__.py +0 -0
- agents/agent.py +70 -0
- api.py +185 -0
- app.py +104 -157
- requirements.txt +2 -2
README.md
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
license: mit
|
11 |
hf_oauth: true
|
|
|
|
|
12 |
---
|
13 |
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Template Final Assignment
|
3 |
+
emoji: 🕵🏻♂️
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.25.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
hf_oauth: true
|
11 |
+
# optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
|
12 |
+
hf_oauth_expiration_minutes: 480
|
13 |
---
|
14 |
|
15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
agents/__init__.py
ADDED
File without changes
|
agents/agent.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import logging
|
4 |
+
from google import genai
|
5 |
+
from google.genai.types import GenerateContentConfig
|
6 |
+
from ratelimit import limits, sleep_and_retry
|
7 |
+
|
8 |
+
RPM = 15
|
9 |
+
TPM = 1_000_000
|
10 |
+
PER_MINUTE = 60
|
11 |
+
SYSTEM_PROMPT_GAIA = "You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
|
12 |
+
|
13 |
+
logging.basicConfig(
|
14 |
+
level=logging.INFO,
|
15 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
16 |
+
datefmt="%Y-%m-%d %H:%M:%S"
|
17 |
+
)
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
class BasicAgent:
|
21 |
+
def __init__(self):
|
22 |
+
logger.info("BasicAgent initialized.")
|
23 |
+
|
24 |
+
def __call__(self, question: str) -> str:
|
25 |
+
logger.info(f"Agent received question (first 50 chars): {question[:50]}...")
|
26 |
+
fixed_answer = "This is a default answer."
|
27 |
+
logger.info(f"Agent returning fixed answer: {fixed_answer}")
|
28 |
+
return fixed_answer
|
29 |
+
|
30 |
+
class SimpleGeminiAgent(BasicAgent):
|
31 |
+
def __init__(self, model="gemini-2.0-flash"):
|
32 |
+
super().__init__()
|
33 |
+
gemini_key = os.getenv("GEMINI_API_KEY")
|
34 |
+
self.client = genai.Client(api_key=gemini_key)
|
35 |
+
self.model = model
|
36 |
+
logger.info("AdvancedAgent initialized.")
|
37 |
+
self.minute_start = time.time()
|
38 |
+
self.tokens_this_minute = 0
|
39 |
+
self.token_count = 0
|
40 |
+
|
41 |
+
@sleep_and_retry
|
42 |
+
@limits(calls=RPM, period=PER_MINUTE)
|
43 |
+
def __call__(self, question: str) -> str:
|
44 |
+
now = time.time()
|
45 |
+
if now - self.minute_start >= 60:
|
46 |
+
self.tokens_this_minute = 0
|
47 |
+
self.minute_start = now
|
48 |
+
|
49 |
+
# Enforce tokens per minute
|
50 |
+
if self.tokens_this_minute + self.token_count > TPM:
|
51 |
+
sleep_time = max(0, 60 - (now - self.minute_start))
|
52 |
+
time.sleep(sleep_time)
|
53 |
+
self.tokens_this_minute = 0
|
54 |
+
self.minute_start = time.time()
|
55 |
+
|
56 |
+
response = self.client.models.generate_content(model=self.model,
|
57 |
+
contents=question,
|
58 |
+
config=GenerateContentConfig(system_instruction=SYSTEM_PROMPT_GAIA))
|
59 |
+
self.tokens_this_minute += response.total_token_count
|
60 |
+
self.token_count += response.total_token_count
|
61 |
+
logger.info(f"AdvancedAgent received question (first 50 chars): {question[:50]}...")
|
62 |
+
logger.info(f"AdvancedAgent returning answer: {response.text}")
|
63 |
+
return response.text
|
64 |
+
|
65 |
+
if __name__ == "__main__":
|
66 |
+
# Example usage
|
67 |
+
agent = SimpleGeminiAgent()
|
68 |
+
question = "What is the capital of France?"
|
69 |
+
answer = agent(question)
|
70 |
+
print(f"Question: {question}\nAnswer: {answer}")
|
api.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import logging
|
4 |
+
import pandas as pd
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
# (Keep Constants as is)
|
8 |
+
# --- Constants ---
|
9 |
+
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
10 |
+
|
11 |
+
logging.basicConfig(
|
12 |
+
level=logging.INFO,
|
13 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
14 |
+
datefmt="%Y-%m-%d %H:%M:%S"
|
15 |
+
)
|
16 |
+
logger = logging.getLogger(__name__)
|
17 |
+
|
18 |
+
class GAIAHFAPIClient:
|
19 |
+
"""
|
20 |
+
A client for interacting with the GAIA HF API for the final assignment.
|
21 |
+
This client handles dealing with the API requests.
|
22 |
+
"""
|
23 |
+
|
24 |
+
def __init__(self, profile: gr.OAuthProfile | None, api_base_url=DEFAULT_API_URL):
|
25 |
+
# --- Determine HF Space Runtime URL and Repo URL ---
|
26 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
27 |
+
#if profile:
|
28 |
+
# self.username = f"{profile.username}"
|
29 |
+
# logger.info(f"User logged in: {self.username}")
|
30 |
+
#else:
|
31 |
+
# logger.warning("User not logged in.")
|
32 |
+
# return "Please Login to Hugging Face with the button.", None
|
33 |
+
self.agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
34 |
+
self.questions_url = f"{api_base_url}/questions"
|
35 |
+
self.random_question_url = f"{api_base_url}/random-question"
|
36 |
+
self.file_task_url = f"{api_base_url}/files/" + "{}"
|
37 |
+
self.submit_url = f"{api_base_url}/submit"
|
38 |
+
self.api_base_url = api_base_url
|
39 |
+
|
40 |
+
def submit_answers(self, submission_data, results_log):
|
41 |
+
try:
|
42 |
+
response = requests.post(self.submit_url, json=submission_data, timeout=60)
|
43 |
+
response.raise_for_status()
|
44 |
+
result_data = response.json()
|
45 |
+
final_status = (
|
46 |
+
f"Submission Successful!\n"
|
47 |
+
f"User: {result_data.get('username')}\n"
|
48 |
+
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
49 |
+
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
50 |
+
f"Message: {result_data.get('message', 'No message received.')}"
|
51 |
+
)
|
52 |
+
print("Submission successful.")
|
53 |
+
results_df = pd.DataFrame(results_log)
|
54 |
+
return final_status, results_df
|
55 |
+
except requests.exceptions.HTTPError as e:
|
56 |
+
error_detail = f"Server responded with status {e.response.status_code}."
|
57 |
+
try:
|
58 |
+
error_json = e.response.json()
|
59 |
+
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
60 |
+
except requests.exceptions.JSONDecodeError:
|
61 |
+
error_detail += f" Response: {e.response.text[:500]}"
|
62 |
+
status_message = f"Submission Failed: {error_detail}"
|
63 |
+
print(status_message)
|
64 |
+
results_df = pd.DataFrame(results_log)
|
65 |
+
return status_message, results_df
|
66 |
+
except requests.exceptions.Timeout:
|
67 |
+
status_message = "Submission Failed: The request timed out."
|
68 |
+
print(status_message)
|
69 |
+
results_df = pd.DataFrame(results_log)
|
70 |
+
return status_message, results_df
|
71 |
+
except requests.exceptions.RequestException as e:
|
72 |
+
status_message = f"Submission Failed: Network error - {e}"
|
73 |
+
print(status_message)
|
74 |
+
results_df = pd.DataFrame(results_log)
|
75 |
+
return status_message, results_df
|
76 |
+
except Exception as e:
|
77 |
+
status_message = f"An unexpected error occurred during submission: {e}"
|
78 |
+
print(status_message)
|
79 |
+
results_df = pd.DataFrame(results_log)
|
80 |
+
return status_message, results_df
|
81 |
+
|
82 |
+
def get_questions(self):
|
83 |
+
"""
|
84 |
+
Fetches the list of questions from the API.
|
85 |
+
Returns:
|
86 |
+
List of questions or an error message.
|
87 |
+
"""
|
88 |
+
logger.info(f"Fetching questions from: {self.questions_url}")
|
89 |
+
try:
|
90 |
+
response = requests.get(self.questions_url, timeout=15)
|
91 |
+
response.raise_for_status()
|
92 |
+
questions_data = response.json()
|
93 |
+
if not questions_data:
|
94 |
+
logger.warning("Fetched questions list is empty.")
|
95 |
+
return "Fetched questions list is empty or invalid format.", None
|
96 |
+
logger.info(f"Fetched {len(questions_data)} questions.")
|
97 |
+
return questions_data, 'success'
|
98 |
+
except requests.exceptions.RequestException as e:
|
99 |
+
logger.error(f"Error fetching questions: {e}")
|
100 |
+
return f"Error fetching questions: {e}", None
|
101 |
+
except requests.exceptions.JSONDecodeError as e:
|
102 |
+
logger.error(f"Error decoding JSON response from questions endpoint: {e}")
|
103 |
+
logger.error(f"Response text: {response.text[:500]}")
|
104 |
+
return f"Error decoding server response for questions: {e}", None
|
105 |
+
except Exception as e:
|
106 |
+
logger.error(f"An unexpected error occurred fetching questions: {e}")
|
107 |
+
return f"An unexpected error occurred fetching questions: {e}", None
|
108 |
+
|
109 |
+
|
110 |
+
def get_random_question(self):
|
111 |
+
"""
|
112 |
+
Fetches a random question from the API.
|
113 |
+
Returns:
|
114 |
+
A random question or an error message.
|
115 |
+
"""
|
116 |
+
logger.info(f"Fetching a random question from: {self.random_question_url}")
|
117 |
+
try:
|
118 |
+
response = requests.get(self.random_question_url, timeout=15)
|
119 |
+
response.raise_for_status()
|
120 |
+
question_data = response.json()
|
121 |
+
if not question_data:
|
122 |
+
logger.warning("No random question data received.")
|
123 |
+
return "No random question data received.", None
|
124 |
+
logger.info(f"Received random question: {question_data.get('question', 'No question text')}")
|
125 |
+
return question_data, None
|
126 |
+
except requests.exceptions.RequestException as e:
|
127 |
+
logger.error(f"Error fetching random question: {e}")
|
128 |
+
return f"Error fetching random question: {e}", None
|
129 |
+
|
130 |
+
def get_file_task(self, task_id):
|
131 |
+
"""
|
132 |
+
Fetches a file task by its ID from the API.
|
133 |
+
Args:
|
134 |
+
task_id (str): The ID of the task to fetch.
|
135 |
+
Returns:
|
136 |
+
The task data or an error message.
|
137 |
+
"""
|
138 |
+
if not task_id:
|
139 |
+
return "Task ID cannot be empty.", None
|
140 |
+
_url = self.file_task_url.format(task_id)
|
141 |
+
logger.info(f"Fetching file task from: {_url}")
|
142 |
+
try:
|
143 |
+
response = requests.get(_url, timeout=15)
|
144 |
+
response.raise_for_status()
|
145 |
+
|
146 |
+
# Handle the case where the response is not JSON / text or bytes. Inspect the response object
|
147 |
+
content_type = response.headers.get("Content-Type", "")
|
148 |
+
if "application/json" in content_type:
|
149 |
+
task_data = response.json()
|
150 |
+
elif "text/" in content_type:
|
151 |
+
task_data = response.text
|
152 |
+
else:
|
153 |
+
# Assume it's a binary file (e.g., CSV, image, etc.)
|
154 |
+
task_data = response.content
|
155 |
+
if not task_data:
|
156 |
+
logger.warning(f"No data found for task ID: {task_id}")
|
157 |
+
return f"No data found for task ID: {task_id}", None
|
158 |
+
logger.info(f"Received file task for ID {task_id}: {task_data}")
|
159 |
+
return task_data, None
|
160 |
+
except requests.exceptions.RequestException as e:
|
161 |
+
logger.error(f"Error fetching file task {task_id}: {e}")
|
162 |
+
return f"Error fetching file task {task_id}: {e}", None
|
163 |
+
|
164 |
+
if __name__ == "__main__":
|
165 |
+
# Example usage
|
166 |
+
demo = gr.Blocks(title="GAIA HF API Client Demo")
|
167 |
+
api_client = GAIAHFAPIClient(profile=demo)
|
168 |
+
random_question, error = api_client.get_random_question()
|
169 |
+
if error:
|
170 |
+
logger.error(f"Error fetching random question: {error}")
|
171 |
+
else:
|
172 |
+
logger.info(f"Random question: {random_question}")
|
173 |
+
|
174 |
+
questions, error = api_client.get_questions()
|
175 |
+
if not error:
|
176 |
+
logger.error(f"Error fetching questions: {error}")
|
177 |
+
else:
|
178 |
+
logger.info(f"Fetched questions: {questions}")
|
179 |
+
|
180 |
+
task_id = 'f918266a-b3e0-4914-865d-4faa564f1aef'
|
181 |
+
task_data, error = api_client.get_file_task(task_id)
|
182 |
+
if error:
|
183 |
+
logger.error(f"Error fetching file task {task_id}: {error}")
|
184 |
+
else:
|
185 |
+
logger.info(f"Fetched file task: {task_data}")
|
app.py
CHANGED
@@ -1,193 +1,143 @@
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
3 |
-
import requests
|
4 |
-
import inspect
|
5 |
import pandas as pd
|
6 |
-
from
|
|
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
# --- Basic Agent Definition ---
|
15 |
-
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
16 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
17 |
"""
|
18 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
19 |
and displays the results.
|
20 |
"""
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
if profile:
|
25 |
-
username = f"{profile.username}"
|
26 |
-
print(f"User logged in: {username}")
|
27 |
-
else:
|
28 |
-
print("User not logged in.")
|
29 |
-
return "Please Login to Hugging Face with the button.", None
|
30 |
-
|
31 |
-
api_url = DEFAULT_API_URL
|
32 |
-
questions_url = f"{api_url}/questions"
|
33 |
-
submit_url = f"{api_url}/submit"
|
34 |
-
|
35 |
-
# 1. Instantiate Agent ( modify this part to create your agent)
|
36 |
-
try:
|
37 |
-
agent = BasicAgent()
|
38 |
-
except Exception as e:
|
39 |
-
print(f"Error instantiating agent: {e}")
|
40 |
-
return f"Error initializing agent: {e}", None
|
41 |
-
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
|
42 |
-
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
|
43 |
-
print(agent_code)
|
44 |
-
|
45 |
# 2. Fetch Questions
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
response.raise_for_status()
|
50 |
-
questions_data = response.json()
|
51 |
-
if not questions_data:
|
52 |
-
print("Fetched questions list is empty.")
|
53 |
-
return "Fetched questions list is empty or invalid format.", None
|
54 |
-
print(f"Fetched {len(questions_data)} questions.")
|
55 |
-
except requests.exceptions.RequestException as e:
|
56 |
-
print(f"Error fetching questions: {e}")
|
57 |
-
return f"Error fetching questions: {e}", None
|
58 |
-
except requests.exceptions.JSONDecodeError as e:
|
59 |
-
print(f"Error decoding JSON response from questions endpoint: {e}")
|
60 |
-
print(f"Response text: {response.text[:500]}")
|
61 |
-
return f"Error decoding server response for questions: {e}", None
|
62 |
-
except Exception as e:
|
63 |
-
print(f"An unexpected error occurred fetching questions: {e}")
|
64 |
-
return f"An unexpected error occurred fetching questions: {e}", None
|
65 |
|
66 |
# 3. Run your Agent
|
67 |
results_log = []
|
68 |
answers_payload = []
|
69 |
-
|
70 |
for item in questions_data:
|
71 |
task_id = item.get("task_id")
|
72 |
question_text = item.get("question")
|
73 |
if not task_id or question_text is None:
|
74 |
-
|
75 |
continue
|
76 |
try:
|
77 |
submitted_answer = agent(question_text)
|
78 |
-
answers_payload.append(
|
79 |
-
|
80 |
-
)
|
81 |
-
results_log.append(
|
82 |
-
{
|
83 |
-
"Task ID": task_id,
|
84 |
-
"Question": question_text,
|
85 |
-
"Submitted Answer": submitted_answer,
|
86 |
-
}
|
87 |
-
)
|
88 |
except Exception as e:
|
89 |
-
|
90 |
-
results_log.append(
|
91 |
-
{
|
92 |
-
"Task ID": task_id,
|
93 |
-
"Question": question_text,
|
94 |
-
"Submitted Answer": f"AGENT ERROR: {e}",
|
95 |
-
}
|
96 |
-
)
|
97 |
-
break
|
98 |
|
99 |
if not answers_payload:
|
100 |
-
|
101 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
102 |
|
103 |
-
# 4. Prepare Submission
|
104 |
-
submission_data = {
|
105 |
-
|
106 |
-
"agent_code": agent_code,
|
107 |
-
"answers": answers_payload,
|
108 |
-
}
|
109 |
-
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
110 |
-
print(status_update)
|
111 |
|
112 |
# 5. Submit
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
try:
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
final_status = (
|
119 |
-
f"Submission Successful!\n"
|
120 |
-
f"User: {result_data.get('username')}\n"
|
121 |
-
f"Overall Score: {result_data.get('score', 'N/A')}% "
|
122 |
-
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
|
123 |
-
f"Message: {result_data.get('message', 'No message received.')}"
|
124 |
-
)
|
125 |
-
print("Submission successful.")
|
126 |
-
results_df = pd.DataFrame(results_log)
|
127 |
-
return final_status, results_df
|
128 |
-
except requests.exceptions.HTTPError as e:
|
129 |
-
error_detail = f"Server responded with status {e.response.status_code}."
|
130 |
-
try:
|
131 |
-
error_json = e.response.json()
|
132 |
-
error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
|
133 |
-
except requests.exceptions.JSONDecodeError:
|
134 |
-
error_detail += f" Response: {e.response.text[:500]}"
|
135 |
-
status_message = f"Submission Failed: {error_detail}"
|
136 |
-
print(status_message)
|
137 |
-
results_df = pd.DataFrame(results_log)
|
138 |
-
return status_message, results_df
|
139 |
-
except requests.exceptions.Timeout:
|
140 |
-
status_message = "Submission Failed: The request timed out."
|
141 |
-
print(status_message)
|
142 |
-
results_df = pd.DataFrame(results_log)
|
143 |
-
return status_message, results_df
|
144 |
-
except requests.exceptions.RequestException as e:
|
145 |
-
status_message = f"Submission Failed: Network error - {e}"
|
146 |
-
print(status_message)
|
147 |
-
results_df = pd.DataFrame(results_log)
|
148 |
-
return status_message, results_df
|
149 |
except Exception as e:
|
150 |
-
|
151 |
-
|
152 |
-
results_df = pd.DataFrame(results_log)
|
153 |
-
return status_message, results_df
|
154 |
|
|
|
|
|
|
|
155 |
|
156 |
-
#
|
157 |
-
|
158 |
-
|
159 |
-
gr.Markdown(
|
160 |
-
"""
|
161 |
-
**Instructions:**
|
162 |
-
|
163 |
-
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
164 |
-
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
165 |
-
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
|
174 |
-
|
175 |
|
176 |
-
|
|
|
177 |
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
# Removed max_rows=10 from DataFrame constructor
|
182 |
-
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
183 |
|
184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
|
186 |
if __name__ == "__main__":
|
187 |
-
print("\n" + "-"
|
188 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
189 |
space_host_startup = os.getenv("SPACE_HOST")
|
190 |
-
space_id_startup = os.getenv("SPACE_ID")
|
191 |
|
192 |
if space_host_startup:
|
193 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
@@ -195,18 +145,15 @@ if __name__ == "__main__":
|
|
195 |
else:
|
196 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
197 |
|
198 |
-
if space_id_startup:
|
199 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
200 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
201 |
-
print(
|
202 |
-
f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
|
203 |
-
)
|
204 |
else:
|
205 |
-
print(
|
206 |
-
"ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
|
207 |
-
)
|
208 |
|
209 |
-
print("-"
|
210 |
|
211 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
212 |
-
demo
|
|
|
|
1 |
import os
|
2 |
+
import logging
|
3 |
import gradio as gr
|
|
|
|
|
4 |
import pandas as pd
|
5 |
+
from api import GAIAHFAPIClient
|
6 |
+
from agents.agent import BasicAgent, SimpleGeminiAgent
|
7 |
|
8 |
+
logging.basicConfig(
|
9 |
+
level=logging.INFO,
|
10 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
11 |
+
datefmt="%Y-%m-%d %H:%M:%S"
|
12 |
+
)
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
|
|
|
|
|
|
|
15 |
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
16 |
"""
|
17 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
18 |
and displays the results.
|
19 |
"""
|
20 |
+
api_client = GAIAHFAPIClient(profile=profile) # Initialize the API client
|
21 |
+
agent = SimpleGeminiAgent()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
# 2. Fetch Questions
|
23 |
+
questions_data, error = api_client.get_questions()
|
24 |
+
if error is None or questions_data is None:
|
25 |
+
return questions_data, error
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# 3. Run your Agent
|
28 |
results_log = []
|
29 |
answers_payload = []
|
30 |
+
logger.info(f"Running agent on {len(questions_data)} questions...")
|
31 |
for item in questions_data:
|
32 |
task_id = item.get("task_id")
|
33 |
question_text = item.get("question")
|
34 |
if not task_id or question_text is None:
|
35 |
+
logger.warning(f"Skipping item with missing task_id or question: {item}")
|
36 |
continue
|
37 |
try:
|
38 |
submitted_answer = agent(question_text)
|
39 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
40 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
except Exception as e:
|
42 |
+
logger.error(f"Error running agent on task {task_id}: {e}")
|
43 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
if not answers_payload:
|
46 |
+
logger.warning("Agent did not produce any answers to submit.")
|
47 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
48 |
|
49 |
+
# 4. Prepare Submission
|
50 |
+
submission_data = {"username": api_client.username.strip(), "agent_code": api_client.agent_code, "answers": answers_payload}
|
51 |
+
logger.info(f"Agent finished. Submitting {len(answers_payload)} answers for user '{api_client.username}'...")
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# 5. Submit
|
54 |
+
logger.info(f"Submitting {len(answers_payload)} answers to: {api_client.submit_url}")
|
55 |
+
api_client.submit(submission_data, results_log)
|
56 |
+
|
57 |
+
def run_and_submit_one(profile: gr.OAuthProfile | None):
|
58 |
+
"""
|
59 |
+
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
60 |
+
and displays the results.
|
61 |
+
"""
|
62 |
+
api_client = GAIAHFAPIClient(profile=profile) # Initialize the API client
|
63 |
+
agent = SimpleGeminiAgent()
|
64 |
+
# 2. Fetch Question
|
65 |
+
questions_data, error = api_client.get_questions()
|
66 |
+
if error is None or questions_data is None:
|
67 |
+
return questions_data, error
|
68 |
+
question_data = questions_data[0]
|
69 |
+
|
70 |
+
# 3. Run your Agent
|
71 |
+
results_log = []
|
72 |
+
answers_payload = []
|
73 |
+
|
74 |
+
task_id = question_data.get("task_id")
|
75 |
+
question_text = question_data.get("question")
|
76 |
+
if not task_id or question_text is None:
|
77 |
+
logger.warning(f"Skipping item with missing task_id or question: {question_data}")
|
78 |
try:
|
79 |
+
submitted_answer = agent(question_text)
|
80 |
+
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
81 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
except Exception as e:
|
83 |
+
logger.error(f"Error running agent on task {task_id}: {e}")
|
84 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
|
|
|
|
85 |
|
86 |
+
if not answers_payload:
|
87 |
+
logger.warning("Agent did not produce any answers to submit.")
|
88 |
+
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
89 |
|
90 |
+
# 4. Prepare Submission
|
91 |
+
submission_data = {"username": api_client.username.strip(), "agent_code": api_client.agent_code, "answers": answers_payload}
|
92 |
+
logger.info(f"Agent finished. Submitting {len(answers_payload)} answers for user '{api_client.username}'...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
+
# 5. Submit
|
95 |
+
logger.info(f"Submitting {len(answers_payload)} answers to: {api_client.submit_url}")
|
96 |
+
api_client.submit(submission_data, results_log)
|
97 |
+
|
98 |
+
def build_gradio_interface():
|
99 |
+
# --- Build Gradio Interface using Blocks ---
|
100 |
+
with gr.Blocks() as demo:
|
101 |
+
gr.Markdown("# Basic Agent Evaluation Runner")
|
102 |
+
gr.Markdown(
|
103 |
+
"""
|
104 |
+
**Instructions:**
|
105 |
+
|
106 |
+
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
|
107 |
+
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
|
108 |
+
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
|
109 |
+
|
110 |
+
---
|
111 |
+
**Disclaimers:**
|
112 |
+
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
|
113 |
+
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
114 |
+
"""
|
115 |
+
)
|
116 |
|
117 |
+
gr.LoginButton()
|
118 |
|
119 |
+
run_all_button = gr.Button("Run Evaluation & Submit All Answers")
|
120 |
+
run_one_button = gr.Button("Run a Single Evaluation")
|
121 |
|
122 |
+
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
|
123 |
+
# Removed max_rows=10 from DataFrame constructor
|
124 |
+
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
|
|
|
|
|
125 |
|
126 |
+
run_all_button.click(
|
127 |
+
fn=run_and_submit_all,
|
128 |
+
outputs=[status_output, results_table]
|
129 |
+
)
|
130 |
+
run_one_button.click(
|
131 |
+
fn=run_and_submit_one,
|
132 |
+
outputs=[status_output, results_table]
|
133 |
+
)
|
134 |
+
return demo
|
135 |
|
136 |
if __name__ == "__main__":
|
137 |
+
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
138 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
139 |
space_host_startup = os.getenv("SPACE_HOST")
|
140 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
141 |
|
142 |
if space_host_startup:
|
143 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
|
145 |
else:
|
146 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
147 |
|
148 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
149 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
150 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
151 |
+
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
|
|
|
|
152 |
else:
|
153 |
+
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
|
|
|
|
154 |
|
155 |
+
print("-"*(60 + len(" App Starting ")) + "\n")
|
156 |
|
157 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
158 |
+
demo = build_gradio_interface()
|
159 |
+
demo.launch(debug=True, share=False)
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
gradio
|
2 |
requests
|
3 |
-
|
4 |
-
|
|
|
1 |
gradio
|
2 |
requests
|
3 |
+
google-genai
|
4 |
+
ratelimit
|