jarguello76 commited on
Commit
9b5e771
·
verified ·
1 Parent(s): 5ca1799

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -281
app.py CHANGED
@@ -1,310 +1,116 @@
 
1
  import os
2
- import re
3
  import gradio as gr
4
  import requests
5
  import pandas as pd
6
- from duckduckgo_search import DDGS
7
- from langchain_core.tools import tool
8
- from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
9
- from langgraph.prebuilt import create_react_agent
10
- from langgraph.graph import StateGraph
11
 
 
 
 
12
  # --- Constants ---
13
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
14
 
15
- # --- Tools ---
16
- @tool
17
- def search_web(query: str) -> str:
18
- """Search the web and return a relevant text snippet."""
19
- try:
20
- with DDGS() as ddgs:
21
- results = list(ddgs.text(query, max_results=3))
22
- if not results or not results[0].get("body"):
23
- return "No result found"
24
- return results[0]["body"]
25
- except Exception as e:
26
- return f"Web search failed: {e}"
27
 
28
- @tool
29
- def run_code_snippet(code: str) -> str:
30
- """Safely execute a Python code snippet and return its result."""
31
- try:
32
- local_env = {}
33
- exec(code, {}, local_env)
34
- result = [v for v in local_env.values() if isinstance(v, (int, float, str))]
35
- return str(result[0]) if result else "0"
36
- except Exception as e:
37
- return f"Error in code: {str(e)}"
38
 
39
- # --- GAIA Agent via LangGraph ---
40
- class LangGraphGAIAAgent:
41
  def __init__(self):
42
- print("[LangGraph Agent] Initializing Hugging Face LLM...")
43
-
44
- # Use a Hugging Face model that supports tool calling
45
- hf_token = os.getenv("agents_token")
46
- if not hf_token:
47
- raise ValueError("agents_token is required. Please set it in your Space secrets.")
48
-
49
- try:
50
- # Try models that are more likely to work with HuggingFaceEndpoint
51
- model_options = [
52
-
53
- "mistralai/Mistral-7B-Instruct-v0.3",
54
- "meta-llama/Llama-3.1-8B-Instruct",
55
- "openai-community/gpt2",
56
- ]
57
-
58
- llm = None
59
- for model_name in model_options:
60
- try:
61
- print(f"Trying model: {model_name}")
62
- endpoint = HuggingFaceEndpoint(
63
- repo_id=model_name,
64
- temperature=0.1,
65
- huggingfacehub_api_token=hf_token
66
- )
67
- # Test the endpoint with a simple call
68
- test_response = endpoint.invoke("Hello")
69
- print(f"Test successful for {model_name}")
70
-
71
- # Wrap with ChatHuggingFace for better compatibility
72
- llm = ChatHuggingFace(llm=endpoint)
73
- print(f"Successfully initialized: {model_name}")
74
- break
75
- except Exception as e:
76
- print(f"Failed to initialize {model_name}: {e}")
77
- continue
78
-
79
- if llm is None:
80
- raise ValueError("Failed to initialize any Hugging Face model")
81
-
82
- except Exception as e:
83
- print(f"Error with HF models: {e}")
84
- raise
85
-
86
- print("[LangGraph Agent] Creating ReAct loop...")
87
- try:
88
- self.graph = create_react_agent(llm, [search_web, run_code_snippet])
89
- print("Successfully created ReAct agent with tools")
90
- except Exception as e:
91
- print(f"Error creating ReAct agent with tools: {e}")
92
- # Always use fallback since HF models often have issues with ReAct
93
- print("Using fallback agent with manual tool integration...")
94
- self._create_fallback_agent(llm, [search_web, run_code_snippet])
95
-
96
- def _create_fallback_agent(self, llm, tools):
97
- """Create a robust fallback agent that manually handles tools"""
98
- from langgraph.graph import StateGraph, END
99
- from typing import Dict, Any
100
-
101
- def agent_node(state: Dict[str, Any]) -> Dict[str, Any]:
102
- messages = state.get("messages", [])
103
- if not messages:
104
- return {"messages": [{"role": "assistant", "content": "No input provided"}]}
105
-
106
- user_message = messages[-1].get("content", "")
107
- print(f"Processing: {user_message[:100]}...")
108
-
109
- try:
110
- # Analyze the question and decide which tool to use
111
- response = self._process_question_with_tools(user_message)
112
- return {"messages": [{"role": "assistant", "content": response}]}
113
- except Exception as e:
114
- print(f"Error in agent node: {e}")
115
- return {"messages": [{"role": "assistant", "content": f"Error processing question: {str(e)}"}]}
116
-
117
- # Create a simple state graph
118
- workflow = StateGraph(dict)
119
- workflow.add_node("agent", agent_node)
120
- workflow.set_entry_point("agent")
121
- workflow.add_edge("agent", END)
122
- self.graph = workflow.compile()
123
-
124
- def _process_question_with_tools(self, question: str) -> str:
125
- """Process question using appropriate tools"""
126
- question_lower = question.lower()
127
-
128
- # Check if we need web search
129
- search_keywords = ["wikipedia", "who", "what", "when", "where", "how many", "albums", "species", "articles"]
130
- needs_search = any(keyword in question_lower for keyword in search_keywords)
131
-
132
- # Check if we need code execution
133
- code_keywords = ["calculate", "compute", "count", "number", "highest", "lowest"]
134
- needs_code = any(keyword in question_lower for keyword in code_keywords)
135
-
136
- result_parts = []
137
-
138
- if needs_search:
139
- try:
140
- # Extract key terms for search
141
- search_query = self._extract_search_terms(question)
142
- print(f"Searching for: {search_query}")
143
- search_result = search_web.invoke({"query": search_query})
144
- result_parts.append(f"Search result: {search_result}")
145
- except Exception as e:
146
- result_parts.append(f"Search failed: {e}")
147
-
148
- if needs_code and ("count" in question_lower or "number" in question_lower):
149
- try:
150
- # Try to generate relevant code
151
- code = self._generate_counting_code(question)
152
- if code:
153
- print(f"Executing code: {code}")
154
- code_result = run_code_snippet.invoke({"code": code})
155
- result_parts.append(f"Calculation: {code_result}")
156
- except Exception as e:
157
- result_parts.append(f"Code execution failed: {e}")
158
-
159
- # Combine results and generate final answer
160
- if result_parts:
161
- combined_info = " | ".join(result_parts)
162
- try:
163
- # Use the LLM to synthesize the final answer
164
- prompt = f"Based on this information: {combined_info}\n\nAnswer the question: {question}\n\nProvide a direct, concise answer:"
165
- llm_response = llm.invoke(prompt)
166
- if hasattr(llm_response, 'content'):
167
- return llm_response.content
168
- else:
169
- return str(llm_response)
170
- except Exception as e:
171
- return combined_info # Return raw info if LLM fails
172
- else:
173
- # Direct LLM response for simple questions
174
- try:
175
- response = llm.invoke(f"Answer this question concisely: {question}")
176
- if hasattr(response, 'content'):
177
- return response.content
178
- else:
179
- return str(response)
180
- except Exception as e:
181
- return f"Unable to process question: {str(e)}"
182
-
183
- def _extract_search_terms(self, question: str) -> str:
184
- """Extract relevant search terms from the question"""
185
- # Remove common question words and extract key terms
186
- import re
187
-
188
- # Handle reverse text (like the third question)
189
- if question.count(' ') > 3 and question.endswith('fI'):
190
- # This might be reversed text
191
- reversed_q = question[::-1]
192
- if "If you understand" in reversed_q:
193
- return "word opposite left"
194
-
195
- # Extract key entities and terms
196
- key_terms = []
197
-
198
- # Look for quoted terms, names, dates, numbers
199
- quoted_terms = re.findall(r'"([^"]*)"', question)
200
- key_terms.extend(quoted_terms)
201
-
202
- # Look for proper nouns (capitalized words)
203
- proper_nouns = re.findall(r'\b[A-Z][a-z]+(?:\s+[A-Z][a-z]+)*\b', question)
204
- key_terms.extend(proper_nouns)
205
-
206
- # Look for years/dates
207
- years = re.findall(r'\b(19|20)\d{2}\b', question)
208
- key_terms.extend(years)
209
-
210
- # Look for specific terms
211
- specific_terms = ["albums", "species", "dinosaur", "wikipedia", "featured article"]
212
- for term in specific_terms:
213
- if term in question.lower():
214
- key_terms.append(term)
215
-
216
- return " ".join(key_terms[:5]) # Limit to 5 terms
217
-
218
- def _generate_counting_code(self, question: str) -> str:
219
- """Generate simple counting code based on the question"""
220
- if "between" in question.lower() and any(year in question for year in ["2000", "2009"]):
221
- return """
222
- # Count years between 2000 and 2009 (inclusive)
223
- start_year = 2000
224
- end_year = 2009
225
- years_in_range = list(range(start_year, end_year + 1))
226
- count = len(years_in_range)
227
- print(f"Years from {start_year} to {end_year}: {count} years")
228
- count
229
- """
230
- return None
231
 
232
  def __call__(self, question: str) -> str:
233
- try:
234
- print(f"[LangGraph Agent] Running on: {question}")
235
- result = self.graph.invoke({"messages": [{"role": "user", "content": question}]})
236
-
237
- # Handle different response formats
238
- if isinstance(result, dict):
239
- if "messages" in result and result["messages"]:
240
- output = result["messages"][-1].get("content", "")
241
- else:
242
- output = result.get("output", str(result))
243
- else:
244
- output = str(result)
245
-
246
- if not output:
247
- raise ValueError("Model produced no output")
248
- return f"FINAL ANSWER: {self.clean(output)}"
249
- except Exception as e:
250
- print(f"[LangGraph Agent] Error: {e}")
251
- return f"FINAL ANSWER: Error: {e}"
252
 
253
- def clean(self, text: str) -> str:
254
- return re.sub(r"[\$,%]", "", text).strip()
255
-
256
- # --- Evaluation & Submission Pipeline ---
257
- def run_and_submit_all(profile: gr.OAuthProfile | None):
258
- space_id = os.getenv("SPACE_ID")
259
  if profile:
260
- username = f"{profile.username}"
261
  print(f"User logged in: {username}")
262
  else:
 
263
  return "Please Login to Hugging Face with the button.", None
264
 
265
  api_url = DEFAULT_API_URL
266
  questions_url = f"{api_url}/questions"
267
  submit_url = f"{api_url}/submit"
268
 
 
269
  try:
270
- agent = LangGraphGAIAAgent()
271
  except Exception as e:
 
272
  return f"Error initializing agent: {e}", None
273
-
274
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
275
  print(agent_code)
276
 
 
 
277
  try:
278
  response = requests.get(questions_url, timeout=15)
279
  response.raise_for_status()
280
  questions_data = response.json()
281
  if not questions_data:
282
- return "Fetched questions list is empty or invalid format.", None
 
283
  print(f"Fetched {len(questions_data)} questions.")
284
- except Exception as e:
 
285
  return f"Error fetching questions: {e}", None
 
 
 
 
 
 
 
286
 
 
287
  results_log = []
288
  answers_payload = []
289
-
290
  for item in questions_data:
291
  task_id = item.get("task_id")
292
  question_text = item.get("question")
293
  if not task_id or question_text is None:
 
294
  continue
295
-
296
  try:
297
  submitted_answer = agent(question_text)
298
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
299
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
300
  except Exception as e:
301
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
 
302
 
303
  if not answers_payload:
 
304
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
305
 
 
306
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
 
 
307
 
 
 
308
  try:
309
  response = requests.post(submit_url, json=submission_data, timeout=60)
310
  response.raise_for_status()
@@ -316,33 +122,59 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
316
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
317
  f"Message: {result_data.get('message', 'No message received.')}"
318
  )
319
- return final_status, pd.DataFrame(results_log)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
  except Exception as e:
321
- return f"Submission Failed: {e}", pd.DataFrame(results_log)
 
 
 
322
 
323
- # --- Gradio UI ---
324
- with gr.Blocks() as demo:
325
- gr.Markdown("# GAIA Agent Evaluation Runner (LangGraph Edition)")
326
- gr.Markdown("""
327
- **Instructions:**
328
- 1. Set your Hugging Face token in the Space Settings → Secrets as `HUGGINGFACEHUB_API_TOKEN`.
329
- 2. Log in to Hugging Face below to associate your username.
330
- 3. Run the agent to fetch, answer, and submit GAIA benchmark questions.
331
 
332
- **Note**: The agent will try multiple HF models and fall back to manual tool usage if needed.
333
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
334
 
335
  gr.LoginButton()
336
 
337
- gr.Markdown("## 🔍 Try a Question Preview")
338
- preview_input = gr.Textbox(label="Your GAIA-style question")
339
- preview_output = gr.Textbox(label="Agent's Response", lines=2)
340
- preview_button = gr.Button("Preview Answer")
341
-
342
- preview_button.click(lambda q: LangGraphGAIAAgent()(q), inputs=preview_input, outputs=preview_output)
343
-
344
  run_button = gr.Button("Run Evaluation & Submit All Answers")
 
345
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
346
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
347
 
348
  run_button.click(
@@ -350,25 +182,26 @@ with gr.Blocks() as demo:
350
  outputs=[status_output, results_table]
351
  )
352
 
353
- # --- Main Entry ---
354
  if __name__ == "__main__":
355
  print("\n" + "-"*30 + " App Starting " + "-"*30)
 
356
  space_host_startup = os.getenv("SPACE_HOST")
357
- space_id_startup = os.getenv("SPACE_ID")
358
 
359
  if space_host_startup:
360
  print(f"✅ SPACE_HOST found: {space_host_startup}")
361
- print(f" Runtime URL: https://{space_host_startup}.hf.space")
362
  else:
363
- print("ℹ️ SPACE_HOST not set.")
364
 
365
- if space_id_startup:
366
  print(f"✅ SPACE_ID found: {space_id_startup}")
367
- print(f" Repo: https://huggingface.co/spaces/{space_id_startup}")
368
- print(f" Code: https://huggingface.co/spaces/{space_id_startup}/tree/main")
369
  else:
370
- print("ℹ️ SPACE_ID not set.")
371
 
372
  print("-"*(60 + len(" App Starting ")) + "\n")
373
- print("Launching Gradio Interface for GAIA Agent Evaluation...")
374
- demo.launch(debug=True, share=False)
 
 
1
+ """ Basic Agent Evaluation Runner"""
2
  import os
3
+ import inspect
4
  import gradio as gr
5
  import requests
6
  import pandas as pd
7
+ from langchain_core.messages import HumanMessage
8
+ from tools import build_graph
 
 
 
9
 
10
+
11
+
12
+ # (Keep Constants as is)
13
  # --- Constants ---
14
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
 
16
+ # --- Basic Agent Definition ---
17
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
 
 
 
 
 
 
 
18
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ class BasicAgent:
21
+ """A langgraph agent."""
22
  def __init__(self):
23
+ print("BasicAgent initialized.")
24
+ self.graph = build_graph()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  def __call__(self, question: str) -> str:
27
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
+ messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages['messages'][-1].content
32
+ return answer[14:]
33
+
34
+
35
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
36
+ """
37
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
38
+ and displays the results.
39
+ """
40
+ # --- Determine HF Space Runtime URL and Repo URL ---
41
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
 
 
 
 
42
 
 
 
 
 
 
 
43
  if profile:
44
+ username= f"{profile.username}"
45
  print(f"User logged in: {username}")
46
  else:
47
+ print("User not logged in.")
48
  return "Please Login to Hugging Face with the button.", None
49
 
50
  api_url = DEFAULT_API_URL
51
  questions_url = f"{api_url}/questions"
52
  submit_url = f"{api_url}/submit"
53
 
54
+ # 1. Instantiate Agent ( modify this part to create your agent)
55
  try:
56
+ agent = BasicAgent()
57
  except Exception as e:
58
+ print(f"Error instantiating agent: {e}")
59
  return f"Error initializing agent: {e}", None
60
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
61
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
62
  print(agent_code)
63
 
64
+ # 2. Fetch Questions
65
+ print(f"Fetching questions from: {questions_url}")
66
  try:
67
  response = requests.get(questions_url, timeout=15)
68
  response.raise_for_status()
69
  questions_data = response.json()
70
  if not questions_data:
71
+ print("Fetched questions list is empty.")
72
+ return "Fetched questions list is empty or invalid format.", None
73
  print(f"Fetched {len(questions_data)} questions.")
74
+ except requests.exceptions.RequestException as e:
75
+ print(f"Error fetching questions: {e}")
76
  return f"Error fetching questions: {e}", None
77
+ except requests.exceptions.JSONDecodeError as e:
78
+ print(f"Error decoding JSON response from questions endpoint: {e}")
79
+ print(f"Response text: {response.text[:500]}")
80
+ return f"Error decoding server response for questions: {e}", None
81
+ except Exception as e:
82
+ print(f"An unexpected error occurred fetching questions: {e}")
83
+ return f"An unexpected error occurred fetching questions: {e}", None
84
 
85
+ # 3. Run your Agent
86
  results_log = []
87
  answers_payload = []
88
+ print(f"Running agent on {len(questions_data)} questions...")
89
  for item in questions_data:
90
  task_id = item.get("task_id")
91
  question_text = item.get("question")
92
  if not task_id or question_text is None:
93
+ print(f"Skipping item with missing task_id or question: {item}")
94
  continue
 
95
  try:
96
  submitted_answer = agent(question_text)
97
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
  except Exception as e:
100
+ print(f"Error running agent on task {task_id}: {e}")
101
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
 
103
  if not answers_payload:
104
+ print("Agent did not produce any answers to submit.")
105
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
 
107
+ # 4. Prepare Submission
108
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
+ print(status_update)
111
 
112
+ # 5. Submit
113
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
114
  try:
115
  response = requests.post(submit_url, json=submission_data, timeout=60)
116
  response.raise_for_status()
 
122
  f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
123
  f"Message: {result_data.get('message', 'No message received.')}"
124
  )
125
+ print("Submission successful.")
126
+ results_df = pd.DataFrame(results_log)
127
+ return final_status, results_df
128
+ except requests.exceptions.HTTPError as e:
129
+ error_detail = f"Server responded with status {e.response.status_code}."
130
+ try:
131
+ error_json = e.response.json()
132
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
133
+ except requests.exceptions.JSONDecodeError:
134
+ error_detail += f" Response: {e.response.text[:500]}"
135
+ status_message = f"Submission Failed: {error_detail}"
136
+ print(status_message)
137
+ results_df = pd.DataFrame(results_log)
138
+ return status_message, results_df
139
+ except requests.exceptions.Timeout:
140
+ status_message = "Submission Failed: The request timed out."
141
+ print(status_message)
142
+ results_df = pd.DataFrame(results_log)
143
+ return status_message, results_df
144
+ except requests.exceptions.RequestException as e:
145
+ status_message = f"Submission Failed: Network error - {e}"
146
+ print(status_message)
147
+ results_df = pd.DataFrame(results_log)
148
+ return status_message, results_df
149
  except Exception as e:
150
+ status_message = f"An unexpected error occurred during submission: {e}"
151
+ print(status_message)
152
+ results_df = pd.DataFrame(results_log)
153
+ return status_message, results_df
154
 
 
 
 
 
 
 
 
 
155
 
156
+ # --- Build Gradio Interface using Blocks ---
157
+ with gr.Blocks() as demo:
158
+ gr.Markdown("# Basic Agent Evaluation Runner")
159
+ gr.Markdown(
160
+ """
161
+ **Instructions:**
162
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
163
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
164
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
165
+ ---
166
+ **Disclaimers:**
167
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
168
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
169
+ """
170
+ )
171
 
172
  gr.LoginButton()
173
 
 
 
 
 
 
 
 
174
  run_button = gr.Button("Run Evaluation & Submit All Answers")
175
+
176
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
177
+ # Removed max_rows=10 from DataFrame constructor
178
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
179
 
180
  run_button.click(
 
182
  outputs=[status_output, results_table]
183
  )
184
 
 
185
  if __name__ == "__main__":
186
  print("\n" + "-"*30 + " App Starting " + "-"*30)
187
+ # Check for SPACE_HOST and SPACE_ID at startup for information
188
  space_host_startup = os.getenv("SPACE_HOST")
189
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
190
 
191
  if space_host_startup:
192
  print(f"✅ SPACE_HOST found: {space_host_startup}")
193
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
194
  else:
195
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
196
 
197
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
198
  print(f"✅ SPACE_ID found: {space_id_startup}")
199
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
200
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
201
  else:
202
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
203
 
204
  print("-"*(60 + len(" App Starting ")) + "\n")
205
+
206
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
207
+ demo.launch(debug=True, share=False)