vaibhavi18092002 commited on
Commit
934cade
Β·
verified Β·
1 Parent(s): b86052d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -92
app.py CHANGED
@@ -1,71 +1,47 @@
 
1
  import os
 
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
 
 
 
6
 
7
- # Try to import Google GenAI SDK
8
- try:
9
- from google import genai
10
- except ImportError:
11
- raise ImportError("Please install the 'google-genai' package: pip install google-genai")
12
 
 
13
  # --- Constants ---
14
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
 
16
- # --- Gemini Agent Definition with Fallback ---
17
- class GeminiAgent:
18
- def __init__(self):
19
- print("GeminiAgent initialized.")
20
- self.primary_key = os.getenv("GEMINI_API_KEY")
21
- self.backup_key = os.getenv("GEMINI_API_KEY_2")
22
-
23
- if not self.primary_key and not self.backup_key:
24
- raise ValueError("No Gemini API key found in environment variables.")
25
 
26
- self.client = None
27
- self.active_key = None
28
- self._init_client(self.primary_key)
29
 
30
- def _init_client(self, key):
31
- try:
32
- self.client = genai.Client(api_key=key)
33
- self.active_key = key
34
- print(f"βœ… Initialized Gemini client with key ending in ...{key[-4:]}")
35
- except Exception as e:
36
- print(f"⚠️ Failed to initialize Gemini client with key ending in ...{key[-4:]}: {e}")
37
- self.client = None
38
-
39
- def _generate_response(self, question):
40
- prompt = f"Answer concisely without explanation: {question}"
41
- response = self.client.models.generate_content(
42
- model="gemini-2.0-flash", contents=prompt
43
- )
44
- return response.text.strip().rstrip(".!?")
45
 
46
  def __call__(self, question: str) -> str:
47
- print(f"GeminiAgent received question (first 50 chars): {question[:50]}...")
48
- try:
49
- return self._generate_response(question)
50
- except Exception as e1:
51
- print(f"❌ Primary key failed: {e1}")
52
- if self.backup_key and self.backup_key != self.active_key:
53
- try:
54
- print("πŸ” Switching to backup key...")
55
- self._init_client(self.backup_key)
56
- return self._generate_response(question)
57
- except Exception as e2:
58
- print(f"❌ Backup key also failed: {e2}")
59
- return f"ERROR: Gemini agent failed with both keys."
60
- else:
61
- return f"ERROR: Gemini agent failed with primary key only."
62
-
63
- # --- Evaluation & Submission Function ---
64
- def run_and_submit_all(profile: gr.OAuthProfile | None):
65
- space_id = os.getenv("SPACE_ID")
66
 
67
  if profile:
68
- username = f"{profile.username}"
69
  print(f"User logged in: {username}")
70
  else:
71
  print("User not logged in.")
@@ -75,34 +51,38 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
75
  questions_url = f"{api_url}/questions"
76
  submit_url = f"{api_url}/submit"
77
 
 
78
  try:
79
- agent = GeminiAgent()
80
  except Exception as e:
81
  print(f"Error instantiating agent: {e}")
82
  return f"Error initializing agent: {e}", None
83
-
84
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
85
  print(agent_code)
86
 
 
87
  print(f"Fetching questions from: {questions_url}")
88
  try:
89
  response = requests.get(questions_url, timeout=15)
90
  response.raise_for_status()
91
  questions_data = response.json()
92
  if not questions_data:
93
- print("Fetched questions list is empty.")
94
- return "Fetched questions list is empty or invalid format.", None
95
  print(f"Fetched {len(questions_data)} questions.")
96
  except requests.exceptions.RequestException as e:
97
  print(f"Error fetching questions: {e}")
98
  return f"Error fetching questions: {e}", None
99
  except requests.exceptions.JSONDecodeError as e:
100
- print(f"Error decoding JSON response: {e}")
101
- return f"Error decoding server response for questions: {e}", None
 
102
  except Exception as e:
103
- print(f"Unexpected error: {e}")
104
  return f"An unexpected error occurred fetching questions: {e}", None
105
 
 
106
  results_log = []
107
  answers_payload = []
108
  print(f"Running agent on {len(questions_data)} questions...")
@@ -117,17 +97,19 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
117
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
118
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
119
  except Exception as e:
120
- print(f"Error running agent on task {task_id}: {e}")
121
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
122
 
123
  if not answers_payload:
 
124
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
125
 
126
- submission_data = {
127
- "username": username.strip(),
128
- "agent_code": agent_code,
129
- "answers": answers_payload
130
- }
 
131
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
132
  try:
133
  response = requests.post(submit_url, json=submission_data, timeout=60)
@@ -140,50 +122,86 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
140
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
141
  f"Message: {result_data.get('message', 'No message received.')}"
142
  )
143
- return final_status, pd.DataFrame(results_log)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  except requests.exceptions.RequestException as e:
145
- status_message = f"Submission Failed: {e}"
146
- return status_message, pd.DataFrame(results_log)
 
 
147
  except Exception as e:
148
- return f"Unexpected error during submission: {e}", pd.DataFrame(results_log)
 
 
 
149
 
150
- # --- Gradio UI ---
151
- with gr.Blocks() as demo:
152
- gr.Markdown("# Gemini Agent Evaluation Runner")
153
- gr.Markdown("""
154
- **Instructions:**
155
- 1. Log in to Hugging Face using the button below.
156
- 2. Click the button to run your agent and submit your answers.
157
 
158
- ---
159
- ⚠️ Evaluation may take time. Keep this tab open until completion.
160
- """)
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
  gr.LoginButton()
 
163
  run_button = gr.Button("Run Evaluation & Submit All Answers")
 
164
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
165
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
166
 
167
- run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
168
 
169
  if __name__ == "__main__":
170
  print("\n" + "-"*30 + " App Starting " + "-"*30)
 
171
  space_host_startup = os.getenv("SPACE_HOST")
172
- space_id_startup = os.getenv("SPACE_ID")
173
 
174
  if space_host_startup:
175
  print(f"βœ… SPACE_HOST found: {space_host_startup}")
176
- print(f" Runtime URL: https://{space_host_startup}.hf.space")
177
  else:
178
- print("ℹ️ SPACE_HOST not found (likely running locally).")
179
 
180
- if space_id_startup:
181
  print(f"βœ… SPACE_ID found: {space_id_startup}")
182
- print(f" Repo: https://huggingface.co/spaces/{space_id_startup}")
183
- print(f" Code: https://huggingface.co/spaces/{space_id_startup}/tree/main")
184
  else:
185
- print("ℹ️ SPACE_ID not found. Cannot link to code.")
 
 
186
 
187
- print("-" * 70)
188
- print("Launching Gradio Interface for Gemini Agent Evaluation...")
189
- demo.launch(debug=True, share=False)
 
1
+ """ Basic Agent Evaluation Runner"""
2
  import os
3
+ import inspect
4
  import gradio as gr
5
  import requests
 
6
  import pandas as pd
7
+ from langchain_core.messages import HumanMessage
8
+ from agent import build_graph
9
+
10
 
 
 
 
 
 
11
 
12
+ # (Keep Constants as is)
13
  # --- Constants ---
14
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
 
16
+ # --- Basic Agent Definition ---
17
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
 
 
 
 
18
 
 
 
 
19
 
20
+ class BasicAgent:
21
+ """A langgraph agent."""
22
+ def __init__(self):
23
+ print("BasicAgent initialized.")
24
+ self.graph = build_graph()
 
 
 
 
 
 
 
 
 
 
25
 
26
  def __call__(self, question: str) -> str:
27
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
+ messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages['messages'][-1].content
32
+ return answer[14:]
33
+
34
+
35
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
36
+ """
37
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
38
+ and displays the results.
39
+ """
40
+ # --- Determine HF Space Runtime URL and Repo URL ---
41
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
 
 
 
42
 
43
  if profile:
44
+ username= f"{profile.username}"
45
  print(f"User logged in: {username}")
46
  else:
47
  print("User not logged in.")
 
51
  questions_url = f"{api_url}/questions"
52
  submit_url = f"{api_url}/submit"
53
 
54
+ # 1. Instantiate Agent ( modify this part to create your agent)
55
  try:
56
+ agent = BasicAgent()
57
  except Exception as e:
58
  print(f"Error instantiating agent: {e}")
59
  return f"Error initializing agent: {e}", None
60
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
61
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
62
  print(agent_code)
63
 
64
+ # 2. Fetch Questions
65
  print(f"Fetching questions from: {questions_url}")
66
  try:
67
  response = requests.get(questions_url, timeout=15)
68
  response.raise_for_status()
69
  questions_data = response.json()
70
  if not questions_data:
71
+ print("Fetched questions list is empty.")
72
+ return "Fetched questions list is empty or invalid format.", None
73
  print(f"Fetched {len(questions_data)} questions.")
74
  except requests.exceptions.RequestException as e:
75
  print(f"Error fetching questions: {e}")
76
  return f"Error fetching questions: {e}", None
77
  except requests.exceptions.JSONDecodeError as e:
78
+ print(f"Error decoding JSON response from questions endpoint: {e}")
79
+ print(f"Response text: {response.text[:500]}")
80
+ return f"Error decoding server response for questions: {e}", None
81
  except Exception as e:
82
+ print(f"An unexpected error occurred fetching questions: {e}")
83
  return f"An unexpected error occurred fetching questions: {e}", None
84
 
85
+ # 3. Run your Agent
86
  results_log = []
87
  answers_payload = []
88
  print(f"Running agent on {len(questions_data)} questions...")
 
97
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
  except Exception as e:
100
+ print(f"Error running agent on task {task_id}: {e}")
101
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
 
103
  if not answers_payload:
104
+ print("Agent did not produce any answers to submit.")
105
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
 
107
+ # 4. Prepare Submission
108
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
+ print(status_update)
111
+
112
+ # 5. Submit
113
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
114
  try:
115
  response = requests.post(submit_url, json=submission_data, timeout=60)
 
122
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
123
  f"Message: {result_data.get('message', 'No message received.')}"
124
  )
125
+ print("Submission successful.")
126
+ results_df = pd.DataFrame(results_log)
127
+ return final_status, results_df
128
+ except requests.exceptions.HTTPError as e:
129
+ error_detail = f"Server responded with status {e.response.status_code}."
130
+ try:
131
+ error_json = e.response.json()
132
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
133
+ except requests.exceptions.JSONDecodeError:
134
+ error_detail += f" Response: {e.response.text[:500]}"
135
+ status_message = f"Submission Failed: {error_detail}"
136
+ print(status_message)
137
+ results_df = pd.DataFrame(results_log)
138
+ return status_message, results_df
139
+ except requests.exceptions.Timeout:
140
+ status_message = "Submission Failed: The request timed out."
141
+ print(status_message)
142
+ results_df = pd.DataFrame(results_log)
143
+ return status_message, results_df
144
  except requests.exceptions.RequestException as e:
145
+ status_message = f"Submission Failed: Network error - {e}"
146
+ print(status_message)
147
+ results_df = pd.DataFrame(results_log)
148
+ return status_message, results_df
149
  except Exception as e:
150
+ status_message = f"An unexpected error occurred during submission: {e}"
151
+ print(status_message)
152
+ results_df = pd.DataFrame(results_log)
153
+ return status_message, results_df
154
 
 
 
 
 
 
 
 
155
 
156
+ # --- Build Gradio Interface using Blocks ---
157
+ with gr.Blocks() as demo:
158
+ gr.Markdown("# Basic Agent Evaluation Runner")
159
+ gr.Markdown(
160
+ """
161
+ **Instructions:**
162
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
163
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
164
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
165
+ ---
166
+ **Disclaimers:**
167
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
168
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
169
+ """
170
+ )
171
 
172
  gr.LoginButton()
173
+
174
  run_button = gr.Button("Run Evaluation & Submit All Answers")
175
+
176
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
177
+ # Removed max_rows=10 from DataFrame constructor
178
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
179
 
180
+ run_button.click(
181
+ fn=run_and_submit_all,
182
+ outputs=[status_output, results_table]
183
+ )
184
 
185
  if __name__ == "__main__":
186
  print("\n" + "-"*30 + " App Starting " + "-"*30)
187
+ # Check for SPACE_HOST and SPACE_ID at startup for information
188
  space_host_startup = os.getenv("SPACE_HOST")
189
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
190
 
191
  if space_host_startup:
192
  print(f"βœ… SPACE_HOST found: {space_host_startup}")
193
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
194
  else:
195
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
196
 
197
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
198
  print(f"βœ… SPACE_ID found: {space_id_startup}")
199
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
200
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
201
  else:
202
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
203
+
204
+ print("-"*(60 + len(" App Starting ")) + "\n")
205
 
206
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
207
+ demo.launch(debug=True, share=False)