Spaces:
Sleeping
Sleeping
Update ui_components.py
Browse files- ui_components.py +336 -309
ui_components.py
CHANGED
@@ -1,32 +1,62 @@
|
|
1 |
"""
|
2 |
-
UI Components for Universal MCP Client -
|
3 |
"""
|
4 |
import gradio as gr
|
5 |
from gradio import ChatMessage
|
6 |
from typing import Tuple, List, Dict, Any
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
from config import AppConfig, CUSTOM_CSS
|
9 |
from chat_handler import ChatHandler
|
10 |
from server_manager import ServerManager
|
11 |
from mcp_client import UniversalMCPClient
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
class UIComponents:
|
14 |
-
"""Manages Gradio UI components
|
15 |
|
16 |
def __init__(self, mcp_client: UniversalMCPClient):
|
17 |
self.mcp_client = mcp_client
|
18 |
self.chat_handler = ChatHandler(mcp_client)
|
19 |
self.server_manager = ServerManager(mcp_client)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
def create_interface(self) -> gr.Blocks:
|
27 |
-
"""Create the main Gradio interface with
|
28 |
with gr.Blocks(
|
29 |
-
title="Universal MCP Client",
|
30 |
theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(),
|
31 |
fill_height=True,
|
32 |
css=CUSTOM_CSS
|
@@ -39,292 +69,246 @@ class UIComponents:
|
|
39 |
chatbot = self._create_main_chat_area()
|
40 |
|
41 |
# Set up event handlers
|
42 |
-
self._setup_event_handlers(chatbot)
|
43 |
|
44 |
return demo
|
45 |
|
46 |
def _create_sidebar(self):
|
47 |
-
"""Create the sidebar with
|
48 |
with gr.Sidebar(elem_id="main-sidebar"):
|
49 |
-
gr.Markdown("#
|
|
|
|
|
|
|
50 |
|
51 |
-
#
|
52 |
-
self.
|
|
|
|
|
|
|
53 |
|
54 |
# Collapsible information section
|
55 |
with gr.Accordion("📚 Guide & Info", open=False):
|
56 |
gr.Markdown("""
|
57 |
## 🎯 How To Use
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
|
63 |
-
## 💭
|
64 |
-
- **
|
65 |
-
- **
|
66 |
-
- **
|
67 |
-
- **Media
|
68 |
-
- **Real-time Status**: Watch tools execute with pending/done status indicators
|
69 |
""")
|
70 |
-
|
71 |
-
# Current backend status
|
72 |
-
self.backend_status = gr.Markdown("**Current Backend**: Not configured")
|
73 |
-
|
74 |
-
# Server management
|
75 |
-
self._create_server_management_section()
|
76 |
|
77 |
-
def
|
78 |
-
"""Create
|
79 |
-
gr.
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
# Anthropic configuration section
|
93 |
-
with gr.Column(visible=True, elem_classes="provider-selection anthropic-config") as self.anthropic_config:
|
94 |
-
gr.Markdown("### 🔹 Anthropic Claude Configuration")
|
95 |
-
|
96 |
-
# Check if API key is available
|
97 |
-
if AppConfig.ANTHROPIC_API_KEY:
|
98 |
-
self.anthropic_status = gr.Markdown("✅ **Status**: API key configured")
|
99 |
-
else:
|
100 |
-
self.anthropic_status = gr.Markdown("""
|
101 |
-
❌ **Status**: API key not found
|
102 |
-
|
103 |
-
**Setup Instructions**:
|
104 |
-
1. Go to Space Settings → Secrets
|
105 |
-
2. Add `ANTHROPIC_API_KEY` with your Anthropic API key
|
106 |
-
3. Restart the space
|
107 |
-
""")
|
108 |
-
|
109 |
-
# HuggingFace Inference Provider configuration section
|
110 |
-
with gr.Column(visible=False, elem_classes="provider-selection hf-config") as self.hf_config:
|
111 |
-
gr.Markdown("### 🔸 HuggingFace Inference Provider Configuration")
|
112 |
-
|
113 |
-
# Check if HF token is available
|
114 |
-
if AppConfig.HF_TOKEN:
|
115 |
-
self.hf_status = gr.Markdown("✅ **Status**: HF token configured")
|
116 |
-
else:
|
117 |
-
self.hf_status = gr.Markdown("""
|
118 |
-
❌ **Status**: HF token not found
|
119 |
-
|
120 |
-
**Setup Instructions**:
|
121 |
-
1. Go to Space Settings → Secrets
|
122 |
-
2. Add `HF_TOKEN` with your HuggingFace token
|
123 |
-
3. Restart the space
|
124 |
-
""")
|
125 |
|
126 |
-
# Provider dropdown
|
127 |
provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys())
|
128 |
self.provider_dropdown = gr.Dropdown(
|
129 |
choices=provider_choices,
|
130 |
-
label="
|
131 |
-
value=
|
132 |
info="Choose your preferred inference provider"
|
133 |
)
|
134 |
|
135 |
-
# Model dropdown (
|
136 |
self.model_dropdown = gr.Dropdown(
|
137 |
choices=[],
|
138 |
-
label="🤖
|
139 |
value=None,
|
140 |
-
info="
|
141 |
)
|
142 |
|
143 |
-
#
|
144 |
-
self.
|
145 |
-
"Configure Inference Provider",
|
146 |
-
variant="primary",
|
147 |
-
size="sm"
|
148 |
-
)
|
149 |
-
|
150 |
-
self.provider_config_status = gr.Textbox(
|
151 |
-
label="Configuration Status",
|
152 |
-
interactive=False,
|
153 |
-
visible=False
|
154 |
-
)
|
155 |
|
156 |
def _create_server_management_section(self):
|
157 |
-
"""Create the server management section
|
158 |
-
gr.
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
-
#
|
173 |
from mcp_spaces_finder import _finder
|
174 |
spaces = _finder.get_mcp_spaces()
|
175 |
-
spaces_count = len(spaces)
|
176 |
-
|
177 |
-
# Create MCP spaces dropdown
|
178 |
self.mcp_dropdown = gr.Dropdown(
|
179 |
choices=spaces,
|
180 |
-
label=f"
|
181 |
value=None,
|
182 |
-
info="Choose
|
183 |
-
allow_custom_value=True
|
|
|
184 |
)
|
185 |
|
186 |
-
# Server title input
|
187 |
self.server_name = gr.Textbox(
|
188 |
-
label="
|
189 |
-
placeholder="
|
|
|
190 |
)
|
191 |
|
192 |
-
|
193 |
-
self.
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
|
|
198 |
|
199 |
-
self.
|
200 |
-
self.status_count = gr.Markdown("**Total MCP Servers**: 0")
|
201 |
-
self.status_output = gr.HTML()
|
202 |
|
203 |
def _create_main_chat_area(self) -> gr.Chatbot:
|
204 |
-
"""Create the main chat area
|
205 |
with gr.Column(elem_classes="main-content"):
|
206 |
-
# Chatbot takes most of the space - configured for ChatMessage
|
207 |
chatbot = gr.Chatbot(
|
208 |
-
label="Universal MCP-Powered
|
209 |
show_label=False,
|
210 |
-
type="messages",
|
211 |
-
scale=1,
|
212 |
show_copy_button=True,
|
213 |
avatar_images=None,
|
214 |
-
# Initial welcome message with proper ChatMessage format
|
215 |
value=[
|
216 |
ChatMessage(
|
217 |
role="assistant",
|
218 |
-
content="Welcome! I'm your MCP-powered AI assistant
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
)
|
220 |
]
|
221 |
)
|
222 |
-
|
223 |
-
|
|
|
224 |
with gr.Column(scale=0, elem_classes="input-area"):
|
225 |
self.chat_input = gr.MultimodalTextbox(
|
226 |
interactive=True,
|
227 |
file_count="multiple",
|
228 |
-
placeholder="Enter message or upload files
|
229 |
show_label=False,
|
230 |
sources=["upload", "microphone"],
|
231 |
-
file_types=None
|
232 |
)
|
233 |
|
234 |
return chatbot
|
235 |
|
236 |
-
def _setup_event_handlers(self, chatbot: gr.Chatbot):
|
237 |
-
"""Set up all event handlers
|
238 |
|
239 |
-
#
|
240 |
-
def
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
else:
|
255 |
-
|
256 |
-
status = "**Current Backend**: HF Inference Providers (⚙️ Configure provider)"
|
257 |
-
else:
|
258 |
-
status = "**Current Backend**: HF Inference Providers (❌ HF token needed)"
|
259 |
|
260 |
return (
|
261 |
-
gr.
|
262 |
-
|
263 |
-
status # backend_status
|
264 |
)
|
265 |
|
266 |
-
|
267 |
-
|
268 |
-
if not
|
269 |
-
return
|
270 |
|
271 |
-
|
272 |
-
return gr.update(choices=models, value=models[0] if models else None)
|
273 |
-
|
274 |
-
def handle_provider_configuration(provider, model):
|
275 |
-
"""Handle inference provider configuration"""
|
276 |
-
if not provider or not model:
|
277 |
-
return "❌ Please select both provider and model", gr.update(visible=True)
|
278 |
-
|
279 |
-
if not AppConfig.HF_TOKEN:
|
280 |
-
return "❌ HF_TOKEN not configured. Please add it to space secrets.", gr.update(visible=True)
|
281 |
|
282 |
-
#
|
283 |
-
|
|
|
|
|
284 |
|
285 |
-
if
|
286 |
-
|
287 |
-
self.current_model = model
|
288 |
-
|
289 |
-
# Update chat handler
|
290 |
-
self.chat_handler.mcp_client = self.mcp_client
|
291 |
-
|
292 |
-
status_msg = f"✅ Configured {provider} with {model}"
|
293 |
-
|
294 |
-
# Update backend status
|
295 |
-
backend_status = f"**Current Backend**: {provider}/{model} ✅"
|
296 |
-
|
297 |
-
return status_msg, gr.update(visible=True)
|
298 |
else:
|
299 |
-
return "❌
|
300 |
|
301 |
-
# Chat
|
302 |
def submit_message(message, history):
|
303 |
-
"""Handle message submission with ChatMessage support"""
|
304 |
-
# Check if backend is properly configured
|
305 |
-
if self.current_backend == "anthropic" and not AppConfig.ANTHROPIC_API_KEY:
|
306 |
-
error_msg = "❌ Please configure Anthropic API key in space settings first."
|
307 |
-
history.append(ChatMessage(role="assistant", content=error_msg))
|
308 |
-
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
309 |
-
|
310 |
-
if self.current_backend == "hf_inference" and (not self.current_provider or not self.current_model):
|
311 |
-
error_msg = "❌ Please configure HuggingFace inference provider first."
|
312 |
-
history.append(ChatMessage(role="assistant", content=error_msg))
|
313 |
-
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
314 |
-
|
315 |
if message and (message.get("text", "").strip() or message.get("files", [])):
|
316 |
-
# Convert existing history to ChatMessage objects if needed
|
317 |
converted_history = []
|
318 |
for msg in history:
|
319 |
if isinstance(msg, dict):
|
320 |
-
# Convert dict to ChatMessage
|
321 |
converted_history.append(ChatMessage(
|
322 |
role=msg.get('role', 'assistant'),
|
323 |
content=msg.get('content', ''),
|
324 |
metadata=msg.get('metadata', None)
|
325 |
))
|
326 |
else:
|
327 |
-
# Already a ChatMessage
|
328 |
converted_history.append(msg)
|
329 |
|
330 |
new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history)
|
@@ -332,125 +316,168 @@ class UIComponents:
|
|
332 |
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
333 |
|
334 |
def enable_input():
|
335 |
-
"""Re-enable input after processing"""
|
336 |
return gr.MultimodalTextbox(interactive=True)
|
337 |
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
343 |
)
|
344 |
|
345 |
-
# Connect provider selection
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
self.provider_dropdown.change(
|
347 |
handle_provider_change,
|
348 |
inputs=[self.provider_dropdown],
|
349 |
-
outputs=[self.model_dropdown]
|
350 |
)
|
351 |
|
352 |
-
|
353 |
-
|
354 |
-
handle_provider_configuration,
|
355 |
inputs=[self.provider_dropdown, self.model_dropdown],
|
356 |
-
outputs=[self.
|
357 |
-
).then(
|
358 |
-
lambda: self._update_backend_status(),
|
359 |
-
outputs=[self.backend_status]
|
360 |
)
|
361 |
|
362 |
-
#
|
363 |
-
|
364 |
submit_message,
|
365 |
inputs=[self.chat_input, chatbot],
|
366 |
outputs=[chatbot, self.chat_input]
|
367 |
)
|
368 |
-
|
369 |
-
|
370 |
-
# Server management event handlers
|
371 |
-
def update_server_display():
|
372 |
-
"""Update the server status display in sidebar"""
|
373 |
-
server_count = len(self.mcp_client.servers)
|
374 |
-
count_text = f"**Connected Servers**: {server_count}"
|
375 |
-
|
376 |
-
if self.mcp_client.servers:
|
377 |
-
server_list = "\n".join([f"• **{name}**" for name in self.mcp_client.servers.keys()])
|
378 |
-
return count_text, server_list
|
379 |
-
else:
|
380 |
-
return count_text, "*No servers connected*\n\nAdd servers below."
|
381 |
-
|
382 |
-
def handle_add_server(server_title, selected_space):
|
383 |
-
"""Handle adding a server and update displays"""
|
384 |
-
# Check if both title and space are provided
|
385 |
-
if not server_title or not server_title.strip():
|
386 |
-
return "❌ Please provide a server title", "", *update_server_display(), ""
|
387 |
-
|
388 |
-
if not selected_space:
|
389 |
-
return "❌ Please select a HuggingFace space from the dropdown", "", *update_server_display(), ""
|
390 |
-
|
391 |
-
# Use the selected space from dropdown
|
392 |
-
status_msg, details_html = self.server_manager.add_custom_server(server_title.strip(), selected_space)
|
393 |
-
|
394 |
-
# Update sidebar server display
|
395 |
-
count_text, list_text = update_server_display()
|
396 |
-
|
397 |
-
return status_msg, details_html, count_text, list_text, "" # Clear server name input
|
398 |
|
399 |
-
|
400 |
-
"""Handle refresh status button"""
|
401 |
-
count_text, accordions_html = self.server_manager.get_server_status()
|
402 |
-
return count_text, accordions_html
|
403 |
-
|
404 |
-
def handle_refresh_spaces():
|
405 |
-
"""Handle refresh MCP spaces button"""
|
406 |
-
from mcp_spaces_finder import refresh_mcp_spaces, _finder
|
407 |
-
|
408 |
-
# Clear cache and get fresh spaces
|
409 |
-
refresh_mcp_spaces()
|
410 |
-
spaces = _finder.get_mcp_spaces()
|
411 |
-
spaces_count = len(spaces)
|
412 |
-
|
413 |
-
# Update dropdown choices with new count
|
414 |
-
return gr.Dropdown(
|
415 |
-
choices=spaces,
|
416 |
-
value=None,
|
417 |
-
label=f"🤖 Select from {spaces_count} Available MCP Server Spaces",
|
418 |
-
info="Choose a HuggingFace space that provides MCP server functionality"
|
419 |
-
)
|
420 |
-
|
421 |
-
# Connect server management events
|
422 |
self.add_server_btn.click(
|
423 |
-
|
424 |
-
|
425 |
-
outputs=[
|
426 |
-
self.add_server_output,
|
427 |
-
self.add_server_details,
|
428 |
-
self.server_count_display,
|
429 |
-
self.server_list_display,
|
430 |
-
self.server_name # Clear server name input
|
431 |
-
]
|
432 |
)
|
433 |
|
434 |
-
|
435 |
-
|
436 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
437 |
)
|
438 |
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
outputs=[self.mcp_dropdown]
|
443 |
)
|
444 |
-
|
445 |
-
def _update_backend_status(self):
|
446 |
-
"""Update backend status based on current configuration"""
|
447 |
-
if self.current_backend == "anthropic":
|
448 |
-
if AppConfig.ANTHROPIC_API_KEY:
|
449 |
-
return "**Current Backend**: Anthropic Claude Sonnet 4 ✅"
|
450 |
-
else:
|
451 |
-
return "**Current Backend**: Anthropic Claude (❌ API key needed)"
|
452 |
-
else:
|
453 |
-
if self.current_provider and self.current_model:
|
454 |
-
return f"**Current Backend**: {self.current_provider}/{self.current_model} ✅"
|
455 |
-
else:
|
456 |
-
return "**Current Backend**: HF Inference Providers (⚙️ Configure provider)"
|
|
|
1 |
"""
|
2 |
+
UI Components for Universal MCP Client - Fixed with optimal MCP guidance
|
3 |
"""
|
4 |
import gradio as gr
|
5 |
from gradio import ChatMessage
|
6 |
from typing import Tuple, List, Dict, Any
|
7 |
+
import os
|
8 |
+
import logging
|
9 |
+
import traceback
|
10 |
+
from openai import OpenAI
|
11 |
|
12 |
+
from config import AppConfig, CUSTOM_CSS, HF_HUB_AVAILABLE
|
13 |
from chat_handler import ChatHandler
|
14 |
from server_manager import ServerManager
|
15 |
from mcp_client import UniversalMCPClient
|
16 |
|
17 |
+
# Import HuggingFace Hub for login functionality
|
18 |
+
if HF_HUB_AVAILABLE:
|
19 |
+
from huggingface_hub import login, logout, whoami
|
20 |
+
from huggingface_hub.utils import HfHubHTTPError
|
21 |
+
|
22 |
+
logger = logging.getLogger(__name__)
|
23 |
+
|
24 |
class UIComponents:
|
25 |
+
"""Manages Gradio UI components with improved MCP server management"""
|
26 |
|
27 |
def __init__(self, mcp_client: UniversalMCPClient):
|
28 |
self.mcp_client = mcp_client
|
29 |
self.chat_handler = ChatHandler(mcp_client)
|
30 |
self.server_manager = ServerManager(mcp_client)
|
31 |
+
self.current_user = None
|
32 |
+
|
33 |
+
def _initialize_default_servers(self):
|
34 |
+
"""Initialize default MCP servers on app startup"""
|
35 |
+
default_servers = [
|
36 |
+
("background removal", "ysharma/background-removal-mcp"),
|
37 |
+
("text to video", "ysharma/ltx-video-distilled"),
|
38 |
+
("text to speech", "ysharma/Kokoro-TTS-mcp-test"),
|
39 |
+
("text to image", "ysharma/dalle-3-xl-lora-v2")
|
40 |
+
]
|
41 |
+
|
42 |
+
logger.info("🚀 Initializing default MCP servers...")
|
43 |
|
44 |
+
for server_name, space_id in default_servers:
|
45 |
+
try:
|
46 |
+
status_msg, _ = self.server_manager.add_custom_server(server_name, space_id)
|
47 |
+
if "✅" in status_msg:
|
48 |
+
logger.info(f"✅ Added default server: {server_name}")
|
49 |
+
else:
|
50 |
+
logger.warning(f"⚠️ Failed to add default server {server_name}: {status_msg}")
|
51 |
+
except Exception as e:
|
52 |
+
logger.error(f"❌ Error adding default server {server_name}: {e}")
|
53 |
+
|
54 |
+
logger.info(f"📊 Initialized {len(self.mcp_client.servers)} default servers")
|
55 |
|
56 |
def create_interface(self) -> gr.Blocks:
|
57 |
+
"""Create the main Gradio interface with improved layout"""
|
58 |
with gr.Blocks(
|
59 |
+
title="Universal MCP Client - HF Inference Powered",
|
60 |
theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(),
|
61 |
fill_height=True,
|
62 |
css=CUSTOM_CSS
|
|
|
69 |
chatbot = self._create_main_chat_area()
|
70 |
|
71 |
# Set up event handlers
|
72 |
+
self._setup_event_handlers(chatbot, demo)
|
73 |
|
74 |
return demo
|
75 |
|
76 |
def _create_sidebar(self):
|
77 |
+
"""Create the sidebar with login, provider/model selection, and server management"""
|
78 |
with gr.Sidebar(elem_id="main-sidebar"):
|
79 |
+
gr.Markdown("# 🤗 chat.gradio.app")
|
80 |
+
|
81 |
+
# HuggingFace Login Section
|
82 |
+
self._create_login_section()
|
83 |
|
84 |
+
# Provider and Model Selection with defaults
|
85 |
+
self._create_provider_model_selection()
|
86 |
+
|
87 |
+
# MCP Server Management
|
88 |
+
self._create_server_management_section()
|
89 |
|
90 |
# Collapsible information section
|
91 |
with gr.Accordion("📚 Guide & Info", open=False):
|
92 |
gr.Markdown("""
|
93 |
## 🎯 How To Use
|
94 |
+
1. **Login**: Login with your HuggingFace account for API access
|
95 |
+
2. **Add MCP Servers**: Connect to various AI tools on 🤗Hub
|
96 |
+
3. **Enable/Disable Servers**: Use checkboxes to control which servers are active
|
97 |
+
4. **Chat**: Interact with GPT-OSS and use connected MCP Servers
|
98 |
|
99 |
+
## 💭 Features
|
100 |
+
- **GPT-OSS Models**: OpenAI's latest open-source reasoning models (128k context)
|
101 |
+
- **MCP Integration**: Connect to thousands of AI apps on Hub via MCP protocol
|
102 |
+
- **Multi-Provider**: Access via Cerebras, Fireworks, Together AI, and others
|
103 |
+
- **Media Support**: Automatic embedding of media -- images, audio, and video etc
|
|
|
104 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
+
def _create_login_section(self):
|
107 |
+
"""Create HuggingFace OAuth login section"""
|
108 |
+
with gr.Group(elem_classes="login-section"):
|
109 |
+
gr.Markdown("## 🔑 Authentication", container=True)
|
110 |
+
self.login_button = gr.LoginButton(
|
111 |
+
value="Sign in with Hugging Face",
|
112 |
+
size="sm"
|
113 |
+
)
|
114 |
+
self.login_status = gr.Markdown("⚪ Please sign in to access Inference Providers", container=True)
|
115 |
+
|
116 |
+
def _create_provider_model_selection(self):
|
117 |
+
"""Create provider and model selection dropdowns with defaults"""
|
118 |
+
with gr.Group(elem_classes="provider-model-selection"):
|
119 |
+
gr.Markdown("## 🚀 Inference Configuration", container=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
+
# Provider dropdown with default selection
|
122 |
provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys())
|
123 |
self.provider_dropdown = gr.Dropdown(
|
124 |
choices=provider_choices,
|
125 |
+
label="🔧 Inference Provider",
|
126 |
+
value="cerebras", # Default to Cerebras
|
127 |
info="Choose your preferred inference provider"
|
128 |
)
|
129 |
|
130 |
+
# Model dropdown (will be populated based on provider)
|
131 |
self.model_dropdown = gr.Dropdown(
|
132 |
choices=[],
|
133 |
+
label="🤖 Model",
|
134 |
value=None,
|
135 |
+
info="Select GPT OSS model variant"
|
136 |
)
|
137 |
|
138 |
+
# Status display
|
139 |
+
self.api_status = gr.Markdown("⚪ Select provider and model to begin", container=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
def _create_server_management_section(self):
|
142 |
+
"""Create the server management section with checkboxes and guidance"""
|
143 |
+
with gr.Group():
|
144 |
+
gr.Markdown("## 🔧 MCP Servers", container=True)
|
145 |
+
|
146 |
+
# ADDED: Optimal server count guidance
|
147 |
+
gr.Markdown("""
|
148 |
+
<div style="background: #f0f8ff; padding: 10px; border-radius: 5px; border-left: 3px solid #4169e1; margin-bottom: 10px;">
|
149 |
+
<strong>💡 Best Practice:</strong> For optimal performance, we recommend keeping
|
150 |
+
<strong>3-6 MCP servers</strong> enabled at once. Too many servers can:
|
151 |
+
• Increase context usage (reducing available tokens for conversation)
|
152 |
+
• Potentially confuse the model when selecting tools
|
153 |
+
• Slow down response times
|
154 |
+
|
155 |
+
You can add more servers but selectively enable only the ones you need for your current task.
|
156 |
+
</div>
|
157 |
+
""", container=True)
|
158 |
+
|
159 |
+
# Server controls
|
160 |
+
with gr.Row():
|
161 |
+
self.add_server_btn = gr.Button("Add MCP Server", variant="primary", size="sm")
|
162 |
+
self.remove_all_btn = gr.Button("Remove All", variant="secondary", size="sm")
|
163 |
+
|
164 |
+
# Add a save button (initially hidden)
|
165 |
+
self.save_server_btn = gr.Button("Save Server", variant="primary", size="sm", visible=False)
|
166 |
|
167 |
+
# MCP server selection
|
168 |
from mcp_spaces_finder import _finder
|
169 |
spaces = _finder.get_mcp_spaces()
|
|
|
|
|
|
|
170 |
self.mcp_dropdown = gr.Dropdown(
|
171 |
choices=spaces,
|
172 |
+
label=f"**Available MCP Servers ({len(spaces)}**)",
|
173 |
value=None,
|
174 |
+
info="Choose from HuggingFace spaces",
|
175 |
+
allow_custom_value=True,
|
176 |
+
visible=False
|
177 |
)
|
178 |
|
|
|
179 |
self.server_name = gr.Textbox(
|
180 |
+
label="Server Title",
|
181 |
+
placeholder="e.g., Text to Image Generator",
|
182 |
+
visible=False
|
183 |
)
|
184 |
|
185 |
+
# Server status and controls
|
186 |
+
self.server_checkboxes = gr.CheckboxGroup(
|
187 |
+
label="Active Servers (Check to enable)",
|
188 |
+
choices=[],
|
189 |
+
value=[],
|
190 |
+
info="✅ Enabled servers can be used | ⬜ Disabled servers are ignored"
|
191 |
+
)
|
192 |
|
193 |
+
self.add_server_output = gr.Markdown("", visible=False, container=True)
|
|
|
|
|
194 |
|
195 |
def _create_main_chat_area(self) -> gr.Chatbot:
|
196 |
+
"""Create the main chat area"""
|
197 |
with gr.Column(elem_classes="main-content"):
|
|
|
198 |
chatbot = gr.Chatbot(
|
199 |
+
label="Universal MCP-Powered AI Assistant",
|
200 |
show_label=False,
|
201 |
+
type="messages",
|
202 |
+
scale=1,
|
203 |
show_copy_button=True,
|
204 |
avatar_images=None,
|
|
|
205 |
value=[
|
206 |
ChatMessage(
|
207 |
role="assistant",
|
208 |
+
content="""Welcome! I'm your MCP-powered AI assistant using OpenAI's GPT-OSS models via HuggingFace Inference Providers.
|
209 |
+
|
210 |
+
🎉 **Pre-loaded MCP servers ready to use:**
|
211 |
+
- **background removal** - Remove backgrounds from images
|
212 |
+
- **text to video** - Generate videos from text descriptions
|
213 |
+
- **text to speech** - Convert text to natural speech
|
214 |
+
- **text to image** - Create images from text prompts
|
215 |
+
|
216 |
+
You can start using these servers right away, add more servers, or remove them as needed. Try asking me to generate an image, create speech, or any other task!"""
|
217 |
)
|
218 |
]
|
219 |
)
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
with gr.Column(scale=0, elem_classes="input-area"):
|
224 |
self.chat_input = gr.MultimodalTextbox(
|
225 |
interactive=True,
|
226 |
file_count="multiple",
|
227 |
+
placeholder="Enter message or upload files...",
|
228 |
show_label=False,
|
229 |
sources=["upload", "microphone"],
|
230 |
+
file_types=None
|
231 |
)
|
232 |
|
233 |
return chatbot
|
234 |
|
235 |
+
def _setup_event_handlers(self, chatbot: gr.Chatbot, demo: gr.Blocks):
|
236 |
+
"""Set up all event handlers"""
|
237 |
|
238 |
+
# OAuth profile handler
|
239 |
+
def handle_oauth_profile(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None):
|
240 |
+
if profile is None:
|
241 |
+
return "⚪ Please sign in to access Inference Providers"
|
242 |
+
|
243 |
+
logger.info(f"👤 OAuth profile received for user: {profile.name}")
|
244 |
+
|
245 |
+
if token and token.token:
|
246 |
+
logger.info("🔑 OAuth token received, updating HF client...")
|
247 |
+
os.environ["HF_TOKEN"] = token.token
|
248 |
+
try:
|
249 |
+
self.mcp_client.hf_client = OpenAI(
|
250 |
+
base_url="https://router.huggingface.co/v1",
|
251 |
+
api_key=token.token
|
252 |
+
)
|
253 |
+
logger.info("✅ HuggingFace Inference client updated with OAuth token")
|
254 |
+
except Exception as e:
|
255 |
+
logger.error(f"❌ Failed to update HF client: {e}")
|
256 |
+
|
257 |
+
return f"✅ Signed in as: **{profile.name}**"
|
258 |
+
|
259 |
+
# Provider selection with auto-model loading
|
260 |
+
def handle_provider_change(provider_id):
|
261 |
+
if not provider_id:
|
262 |
+
return gr.Dropdown(choices=[], value=None), "⚪ Select provider first"
|
263 |
+
|
264 |
+
available_models = AppConfig.get_available_models_for_provider(provider_id)
|
265 |
+
model_choices = [(AppConfig.AVAILABLE_MODELS[model]["name"], model) for model in available_models]
|
266 |
+
|
267 |
+
# Auto-select 120b model if available
|
268 |
+
default_model = "openai/gpt-oss-120b" if "openai/gpt-oss-120b" in available_models else (available_models[0] if available_models else None)
|
269 |
+
|
270 |
+
# Get context info for status
|
271 |
+
if default_model:
|
272 |
+
model_info = AppConfig.AVAILABLE_MODELS.get(default_model, {})
|
273 |
+
context_length = model_info.get("context_length", 128000)
|
274 |
+
status_msg = f"✅ Provider selected, model auto-selected ({context_length:,} token context)"
|
275 |
else:
|
276 |
+
status_msg = "✅ Provider selected, please select a model"
|
|
|
|
|
|
|
277 |
|
278 |
return (
|
279 |
+
gr.Dropdown(choices=model_choices, value=default_model, label="🤖 Model"),
|
280 |
+
status_msg
|
|
|
281 |
)
|
282 |
|
283 |
+
# Model selection
|
284 |
+
def handle_model_change(provider_id, model_id):
|
285 |
+
if not provider_id or not model_id:
|
286 |
+
return "⚪ Select both provider and model"
|
287 |
|
288 |
+
self.mcp_client.set_model_and_provider(provider_id, model_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
289 |
|
290 |
+
# Get model info
|
291 |
+
model_info = AppConfig.AVAILABLE_MODELS.get(model_id, {})
|
292 |
+
context_length = model_info.get("context_length", 128000)
|
293 |
+
active_params = model_info.get("active_params", "N/A")
|
294 |
|
295 |
+
if self.mcp_client.hf_client:
|
296 |
+
return f"✅ Ready! Using {active_params} active params, {context_length:,} token context"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
else:
|
298 |
+
return "❌ Please login first"
|
299 |
|
300 |
+
# Chat handlers
|
301 |
def submit_message(message, history):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
if message and (message.get("text", "").strip() or message.get("files", [])):
|
|
|
303 |
converted_history = []
|
304 |
for msg in history:
|
305 |
if isinstance(msg, dict):
|
|
|
306 |
converted_history.append(ChatMessage(
|
307 |
role=msg.get('role', 'assistant'),
|
308 |
content=msg.get('content', ''),
|
309 |
metadata=msg.get('metadata', None)
|
310 |
))
|
311 |
else:
|
|
|
312 |
converted_history.append(msg)
|
313 |
|
314 |
new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history)
|
|
|
316 |
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
317 |
|
318 |
def enable_input():
|
|
|
319 |
return gr.MultimodalTextbox(interactive=True)
|
320 |
|
321 |
+
def show_add_server_fields():
|
322 |
+
return [
|
323 |
+
gr.Dropdown(visible=True), # mcp_dropdown
|
324 |
+
gr.Textbox(visible=True), # server_name
|
325 |
+
gr.Button(interactive=False), # add_server_btn - disable it
|
326 |
+
gr.Button(visible=True) # save_server_btn - show it
|
327 |
+
]
|
328 |
+
|
329 |
+
def hide_add_server_fields():
|
330 |
+
return [
|
331 |
+
gr.Dropdown(visible=False, value=None), # mcp_dropdown
|
332 |
+
gr.Textbox(visible=False, value=""), # server_name
|
333 |
+
gr.Button(interactive=True), # add_server_btn - re-enable it
|
334 |
+
gr.Button(visible=False) # save_server_btn - hide it
|
335 |
+
]
|
336 |
+
|
337 |
+
def handle_add_server(server_title, selected_space):
|
338 |
+
if not server_title or not selected_space:
|
339 |
+
return [
|
340 |
+
gr.Dropdown(visible=False, value=None),
|
341 |
+
gr.Textbox(visible=False, value=""),
|
342 |
+
gr.Button(interactive=True), # Re-enable add button
|
343 |
+
gr.Button(visible=False), # Hide save button
|
344 |
+
gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()),
|
345 |
+
value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]),
|
346 |
+
gr.Markdown("❌ Please provide both server title and space selection", visible=True)
|
347 |
+
]
|
348 |
+
|
349 |
+
try:
|
350 |
+
status_msg, _ = self.server_manager.add_custom_server(server_title.strip(), selected_space)
|
351 |
+
|
352 |
+
# Update checkboxes with all servers
|
353 |
+
server_choices = list(self.mcp_client.servers.keys())
|
354 |
+
enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]
|
355 |
+
|
356 |
+
# Check if we have many servers and show a warning
|
357 |
+
warning_msg = ""
|
358 |
+
if len(enabled_servers) > 6:
|
359 |
+
warning_msg = "\n\n⚠️ **Note:** You have more than 6 servers enabled. Consider disabling some for better performance."
|
360 |
+
|
361 |
+
return [
|
362 |
+
gr.Dropdown(visible=False, value=None),
|
363 |
+
gr.Textbox(visible=False, value=""),
|
364 |
+
gr.Button(interactive=True), # Re-enable add button
|
365 |
+
gr.Button(visible=False), # Hide save button
|
366 |
+
gr.CheckboxGroup(choices=server_choices, value=enabled_servers),
|
367 |
+
gr.Markdown(status_msg + warning_msg, visible=True)
|
368 |
+
]
|
369 |
+
|
370 |
+
except Exception as e:
|
371 |
+
logger.error(f"Error adding server: {e}")
|
372 |
+
return [
|
373 |
+
gr.Dropdown(visible=False, value=None),
|
374 |
+
gr.Textbox(visible=False, value=""),
|
375 |
+
gr.Button(interactive=True), # Re-enable add button
|
376 |
+
gr.Button(visible=False), # Hide save button
|
377 |
+
gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()),
|
378 |
+
value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]),
|
379 |
+
gr.Markdown(f"❌ Error: {str(e)}", visible=True)
|
380 |
+
]
|
381 |
+
|
382 |
+
def handle_server_toggle(enabled_servers):
|
383 |
+
"""Handle enabling/disabling servers via checkboxes"""
|
384 |
+
# Update enabled status for all servers
|
385 |
+
for server_name in self.mcp_client.servers.keys():
|
386 |
+
self.mcp_client.enable_server(server_name, server_name in enabled_servers)
|
387 |
+
|
388 |
+
enabled_count = len(enabled_servers)
|
389 |
+
|
390 |
+
# Provide feedback based on count
|
391 |
+
if enabled_count == 0:
|
392 |
+
message = "ℹ️ No servers enabled - chatbot will use native capabilities only"
|
393 |
+
elif enabled_count <= 6:
|
394 |
+
message = f"✅ {enabled_count} server{'s' if enabled_count != 1 else ''} enabled - optimal configuration"
|
395 |
+
else:
|
396 |
+
message = f"⚠️ {enabled_count} servers enabled - consider reducing to 3-6 for better performance"
|
397 |
+
|
398 |
+
return gr.Markdown(message, visible=True)
|
399 |
+
|
400 |
+
def handle_remove_all():
|
401 |
+
"""Remove all MCP servers"""
|
402 |
+
count = self.mcp_client.remove_all_servers()
|
403 |
+
return [
|
404 |
+
gr.CheckboxGroup(choices=[], value=[]),
|
405 |
+
gr.Markdown(f"✅ Removed all {count} servers", visible=True)
|
406 |
+
]
|
407 |
+
|
408 |
+
# Load handler to initialize default mcp servers
|
409 |
+
def initialize_defaults():
|
410 |
+
"""Initialize default servers and update UI on app load"""
|
411 |
+
self._initialize_default_servers()
|
412 |
+
|
413 |
+
# Return updated checkboxes with the default servers
|
414 |
+
server_choices = list(self.mcp_client.servers.keys())
|
415 |
+
enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]
|
416 |
+
|
417 |
+
return gr.CheckboxGroup(
|
418 |
+
choices=server_choices,
|
419 |
+
value=enabled_servers,
|
420 |
+
label=f"Active Servers ({len(server_choices)} loaded)"
|
421 |
+
)
|
422 |
+
|
423 |
+
# Connect OAuth
|
424 |
+
demo.load(
|
425 |
+
fn=handle_oauth_profile,
|
426 |
+
outputs=[self.login_status]
|
427 |
)
|
428 |
|
429 |
+
# Connect provider/model dropdowns with auto-selection on load
|
430 |
+
demo.load(
|
431 |
+
fn=lambda: handle_provider_change("cerebras"),
|
432 |
+
outputs=[self.model_dropdown, self.api_status]
|
433 |
+
)
|
434 |
+
|
435 |
+
# Initialise default mcp server load
|
436 |
+
demo.load(
|
437 |
+
fn=initialize_defaults,
|
438 |
+
outputs=[self.server_checkboxes]
|
439 |
+
)
|
440 |
+
|
441 |
self.provider_dropdown.change(
|
442 |
handle_provider_change,
|
443 |
inputs=[self.provider_dropdown],
|
444 |
+
outputs=[self.model_dropdown, self.api_status]
|
445 |
)
|
446 |
|
447 |
+
self.model_dropdown.change(
|
448 |
+
handle_model_change,
|
|
|
449 |
inputs=[self.provider_dropdown, self.model_dropdown],
|
450 |
+
outputs=[self.api_status]
|
|
|
|
|
|
|
451 |
)
|
452 |
|
453 |
+
# Connect chat
|
454 |
+
chat_submit = self.chat_input.submit(
|
455 |
submit_message,
|
456 |
inputs=[self.chat_input, chatbot],
|
457 |
outputs=[chatbot, self.chat_input]
|
458 |
)
|
459 |
+
chat_submit.then(enable_input, None, [self.chat_input])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
460 |
|
461 |
+
# Connect server management with proper button state handling
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
self.add_server_btn.click(
|
463 |
+
fn=show_add_server_fields,
|
464 |
+
outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
465 |
)
|
466 |
|
467 |
+
# Connect save button
|
468 |
+
self.save_server_btn.click(
|
469 |
+
fn=handle_add_server,
|
470 |
+
inputs=[self.server_name, self.mcp_dropdown],
|
471 |
+
outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn, self.server_checkboxes, self.add_server_output]
|
472 |
+
)
|
473 |
+
|
474 |
+
self.server_checkboxes.change(
|
475 |
+
handle_server_toggle,
|
476 |
+
inputs=[self.server_checkboxes],
|
477 |
+
outputs=[self.add_server_output]
|
478 |
)
|
479 |
|
480 |
+
self.remove_all_btn.click(
|
481 |
+
handle_remove_all,
|
482 |
+
outputs=[self.server_checkboxes, self.add_server_output]
|
|
|
483 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|