Gustavo Gonçalves commited on
Commit
cc884e8
·
1 Parent(s): b9e458c

Simple test

Browse files
Files changed (3) hide show
  1. agents.py +81 -0
  2. app.py +48 -26
  3. requirements.txt +3 -0
agents.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ast import main
2
+ import os
3
+ from typing import TypedDict, List, Dict, Any, Optional
4
+ from langgraph.graph import StateGraph, START, END
5
+ from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
6
+ from langchain_google_genai import ChatGoogleGenerativeAI
7
+ from langchain_core.rate_limiters import InMemoryRateLimiter
8
+
9
+
10
+ GAIA_PROMPT = "You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
11
+
12
+ # Initialize our LLM
13
+
14
+
15
+ # Data
16
+ class GAIAAgentState(TypedDict):
17
+ """State of the GAIA agent."""
18
+
19
+ task_id: str
20
+ question: str
21
+ file_id: Optional[str]
22
+ answer: Optional[str]
23
+ thought: Optional[str]
24
+ # TODO add file binary fields
25
+
26
+
27
+ class BasicAgent:
28
+ def __init__(self):
29
+ # Set up the rate limiter
30
+ self.rate_limiter = InMemoryRateLimiter(
31
+ requests_per_second=0.2 # 12 requests per minute
32
+ )
33
+ self.model = ChatGoogleGenerativeAI(
34
+ model="gemini-2.0-flash",
35
+ temperature=0,
36
+ max_tokens=None,
37
+ timeout=None,
38
+ max_retries=2,
39
+ google_api_key=os.environ["GEMINI_API_KEY"],
40
+ rate_limiter=self.rate_limiter,
41
+ )
42
+ print("BasicAgent initialized.")
43
+
44
+ def __call__(self, question: str) -> str:
45
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
46
+ messages = [
47
+ ("system", GAIA_PROMPT),
48
+ ("human", question),
49
+ ]
50
+
51
+ # Pass the messages to the model
52
+ ai_msg = self.model.invoke(messages)
53
+
54
+ # Extract and return the AI's response
55
+ print(f"Agent returning response: {ai_msg.content}")
56
+ return (
57
+ str(ai_msg.content)
58
+ if not isinstance(ai_msg.content, str)
59
+ else ai_msg.content
60
+ )
61
+
62
+
63
+ class GraphManager:
64
+ def __init__(self):
65
+ self.graph = StateGraph(GAIAAgentState)
66
+ print("GraphManager initialized.")
67
+
68
+ def read_question_and_define_gaia_state(
69
+ self, state: GAIAAgentState
70
+ ) -> GAIAAgentState:
71
+ pass # TODO: Implement the logic to read the question and define the GAIA state
72
+
73
+ def build_graph(self) -> StateGraph:
74
+ # Add nodes
75
+ self.graph.add_node(
76
+ "read_question_and_define_gaia_state",
77
+ self.read_question_and_define_gaia_state,
78
+ )
79
+ # Add edges
80
+ self.graph.add_edge(START, "read_question_and_define_gaia_state")
81
+ return self.graph
app.py CHANGED
@@ -13,16 +13,16 @@ GAIA_PROMPT = "You are a general AI assistant. I will ask you a question. Report
13
 
14
  # --- Basic Agent Definition ---
15
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
- def run_and_submit_all( profile: gr.OAuthProfile | None):
17
  """
18
  Fetches all questions, runs the BasicAgent on them, submits all answers,
19
  and displays the results.
20
  """
21
  # --- Determine HF Space Runtime URL and Repo URL ---
22
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
23
 
24
  if profile:
25
- username= f"{profile.username}"
26
  print(f"User logged in: {username}")
27
  else:
28
  print("User not logged in.")
@@ -49,16 +49,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
49
  response.raise_for_status()
50
  questions_data = response.json()
51
  if not questions_data:
52
- print("Fetched questions list is empty.")
53
- return "Fetched questions list is empty or invalid format.", None
54
  print(f"Fetched {len(questions_data)} questions.")
55
  except requests.exceptions.RequestException as e:
56
  print(f"Error fetching questions: {e}")
57
  return f"Error fetching questions: {e}", None
58
  except requests.exceptions.JSONDecodeError as e:
59
- print(f"Error decoding JSON response from questions endpoint: {e}")
60
- print(f"Response text: {response.text[:500]}")
61
- return f"Error decoding server response for questions: {e}", None
62
  except Exception as e:
63
  print(f"An unexpected error occurred fetching questions: {e}")
64
  return f"An unexpected error occurred fetching questions: {e}", None
@@ -75,18 +75,37 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
75
  continue
76
  try:
77
  submitted_answer = agent(question_text)
78
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
79
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
 
 
 
 
 
 
 
80
  except Exception as e:
81
- print(f"Error running agent on task {task_id}: {e}")
82
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
 
 
 
 
 
 
 
83
 
84
  if not answers_payload:
85
  print("Agent did not produce any answers to submit.")
86
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
87
 
88
- # 4. Prepare Submission
89
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
 
 
 
 
90
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
91
  print(status_update)
92
 
@@ -156,20 +175,19 @@ with gr.Blocks() as demo:
156
 
157
  run_button = gr.Button("Run Evaluation & Submit All Answers")
158
 
159
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
 
160
  # Removed max_rows=10 from DataFrame constructor
161
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
162
 
163
- run_button.click(
164
- fn=run_and_submit_all,
165
- outputs=[status_output, results_table]
166
- )
167
 
168
  if __name__ == "__main__":
169
- print("\n" + "-"*30 + " App Starting " + "-"*30)
170
  # Check for SPACE_HOST and SPACE_ID at startup for information
171
  space_host_startup = os.getenv("SPACE_HOST")
172
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
173
 
174
  if space_host_startup:
175
  print(f"✅ SPACE_HOST found: {space_host_startup}")
@@ -177,14 +195,18 @@ if __name__ == "__main__":
177
  else:
178
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
179
 
180
- if space_id_startup: # Print repo URLs if SPACE_ID is found
181
  print(f"✅ SPACE_ID found: {space_id_startup}")
182
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
183
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
 
 
184
  else:
185
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
 
 
186
 
187
- print("-"*(60 + len(" App Starting ")) + "\n")
188
 
189
  print("Launching Gradio Interface for Basic Agent Evaluation...")
190
- demo.launch(debug=True, share=False)
 
13
 
14
  # --- Basic Agent Definition ---
15
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
17
  """
18
  Fetches all questions, runs the BasicAgent on them, submits all answers,
19
  and displays the results.
20
  """
21
  # --- Determine HF Space Runtime URL and Repo URL ---
22
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
23
 
24
  if profile:
25
+ username = f"{profile.username}"
26
  print(f"User logged in: {username}")
27
  else:
28
  print("User not logged in.")
 
49
  response.raise_for_status()
50
  questions_data = response.json()
51
  if not questions_data:
52
+ print("Fetched questions list is empty.")
53
+ return "Fetched questions list is empty or invalid format.", None
54
  print(f"Fetched {len(questions_data)} questions.")
55
  except requests.exceptions.RequestException as e:
56
  print(f"Error fetching questions: {e}")
57
  return f"Error fetching questions: {e}", None
58
  except requests.exceptions.JSONDecodeError as e:
59
+ print(f"Error decoding JSON response from questions endpoint: {e}")
60
+ print(f"Response text: {response.text[:500]}")
61
+ return f"Error decoding server response for questions: {e}", None
62
  except Exception as e:
63
  print(f"An unexpected error occurred fetching questions: {e}")
64
  return f"An unexpected error occurred fetching questions: {e}", None
 
75
  continue
76
  try:
77
  submitted_answer = agent(question_text)
78
+ answers_payload.append(
79
+ {"task_id": task_id, "submitted_answer": submitted_answer}
80
+ )
81
+ results_log.append(
82
+ {
83
+ "Task ID": task_id,
84
+ "Question": question_text,
85
+ "Submitted Answer": submitted_answer,
86
+ }
87
+ )
88
  except Exception as e:
89
+ print(f"Error running agent on task {task_id}: {e}")
90
+ results_log.append(
91
+ {
92
+ "Task ID": task_id,
93
+ "Question": question_text,
94
+ "Submitted Answer": f"AGENT ERROR: {e}",
95
+ }
96
+ )
97
+ break
98
 
99
  if not answers_payload:
100
  print("Agent did not produce any answers to submit.")
101
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
102
 
103
+ # 4. Prepare Submission
104
+ submission_data = {
105
+ "username": username.strip(),
106
+ "agent_code": agent_code,
107
+ "answers": answers_payload,
108
+ }
109
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
  print(status_update)
111
 
 
175
 
176
  run_button = gr.Button("Run Evaluation & Submit All Answers")
177
 
178
+ status_output = gr.Textbox(
179
+ label="Run Status / Submission Result", lines=5, interactive=False
180
+ )
181
  # Removed max_rows=10 from DataFrame constructor
182
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
183
 
184
+ run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
185
 
186
  if __name__ == "__main__":
187
+ print("\n" + "-" * 30 + " App Starting " + "-" * 30)
188
  # Check for SPACE_HOST and SPACE_ID at startup for information
189
  space_host_startup = os.getenv("SPACE_HOST")
190
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
191
 
192
  if space_host_startup:
193
  print(f"✅ SPACE_HOST found: {space_host_startup}")
 
195
  else:
196
  print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
197
 
198
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
199
  print(f"✅ SPACE_ID found: {space_id_startup}")
200
  print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
201
+ print(
202
+ f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
203
+ )
204
  else:
205
+ print(
206
+ "ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
207
+ )
208
 
209
+ print("-" * (60 + len(" App Starting ")) + "\n")
210
 
211
  print("Launching Gradio Interface for Basic Agent Evaluation...")
212
+ demo.launch(debug=True, share=False)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ requests
3
+ langchain-google-genai