Spaces:
Sleeping
Sleeping
Update ui_components.py
Browse files- ui_components.py +222 -72
ui_components.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
"""
|
2 |
-
UI Components for Universal MCP Client -
|
3 |
"""
|
4 |
import gradio as gr
|
5 |
from gradio import ChatMessage
|
@@ -11,15 +11,20 @@ from server_manager import ServerManager
|
|
11 |
from mcp_client import UniversalMCPClient
|
12 |
|
13 |
class UIComponents:
|
14 |
-
"""Manages Gradio UI components and event handlers with
|
15 |
|
16 |
def __init__(self, mcp_client: UniversalMCPClient):
|
17 |
self.mcp_client = mcp_client
|
18 |
self.chat_handler = ChatHandler(mcp_client)
|
19 |
self.server_manager = ServerManager(mcp_client)
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
def create_interface(self) -> gr.Blocks:
|
22 |
-
"""Create the main Gradio interface"""
|
23 |
with gr.Blocks(
|
24 |
title="Universal MCP Client",
|
25 |
theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(),
|
@@ -39,41 +44,129 @@ class UIComponents:
|
|
39 |
return demo
|
40 |
|
41 |
def _create_sidebar(self):
|
42 |
-
"""Create the sidebar with server management"""
|
43 |
with gr.Sidebar(elem_id="main-sidebar"):
|
44 |
gr.Markdown("# Gradio.chat.app")
|
45 |
|
|
|
|
|
|
|
46 |
# Collapsible information section
|
47 |
with gr.Accordion("π Guide & Info", open=False):
|
48 |
gr.Markdown("""
|
49 |
## π― How To Use
|
50 |
-
- **
|
51 |
-
- **
|
52 |
-
- **
|
|
|
53 |
|
54 |
## π New UI Features
|
|
|
55 |
- **Tool Usage**: See tool calls in collapsible accordions with π§ icons
|
56 |
- **Results**: View tool results in nested thoughts with π icons
|
57 |
- **Media Output**: Images, audio, and videos display separately from text
|
58 |
- **Real-time Status**: Watch tools execute with pending/done status indicators
|
59 |
""")
|
60 |
|
61 |
-
#
|
62 |
-
gr.Markdown("
|
63 |
-
self.server_count_display = gr.Markdown(f"**Connected Servers**: {len(self.mcp_client.servers)}")
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
else:
|
69 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
#
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
def _create_server_management_section(self):
|
75 |
"""Create the server management section in sidebar"""
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
gr.Markdown("**Add MCP Servers**")
|
78 |
|
79 |
# Get MCP spaces count for dropdown label
|
@@ -122,7 +215,7 @@ class UIComponents:
|
|
122 |
value=[
|
123 |
ChatMessage(
|
124 |
role="assistant",
|
125 |
-
content="Welcome! I'm your MCP-powered AI assistant. I can help you with various tasks using connected MCP servers
|
126 |
)
|
127 |
]
|
128 |
)
|
@@ -141,11 +234,84 @@ class UIComponents:
|
|
141 |
return chatbot
|
142 |
|
143 |
def _setup_event_handlers(self, chatbot: gr.Chatbot):
|
144 |
-
"""Set up all event handlers for the interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
# Chat event handlers
|
147 |
def submit_message(message, history):
|
148 |
"""Handle message submission with ChatMessage support"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
if message and (message.get("text", "").strip() or message.get("files", [])):
|
150 |
# Convert existing history to ChatMessage objects if needed
|
151 |
converted_history = []
|
@@ -169,6 +335,30 @@ class UIComponents:
|
|
169 |
"""Re-enable input after processing"""
|
170 |
return gr.MultimodalTextbox(interactive=True)
|
171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
# Set up the chat flow - using submit event
|
173 |
chat_msg_enter = self.chat_input.submit(
|
174 |
submit_message,
|
@@ -231,7 +421,7 @@ class UIComponents:
|
|
231 |
# Connect server management events
|
232 |
self.add_server_btn.click(
|
233 |
handle_add_server,
|
234 |
-
inputs=[self.server_name, self.mcp_dropdown],
|
235 |
outputs=[
|
236 |
self.add_server_output,
|
237 |
self.add_server_details,
|
@@ -251,56 +441,16 @@ class UIComponents:
|
|
251 |
handle_refresh_spaces,
|
252 |
outputs=[self.mcp_dropdown]
|
253 |
)
|
254 |
-
|
255 |
-
# Optional: Add a clear chat button for better UX
|
256 |
-
def clear_chat():
|
257 |
-
"""Clear chat history and return to welcome message"""
|
258 |
-
return [
|
259 |
-
ChatMessage(
|
260 |
-
role="assistant",
|
261 |
-
content="Chat cleared! I'm ready to help you with new tasks using MCP servers."
|
262 |
-
)
|
263 |
-
]
|
264 |
-
|
265 |
-
# You can add a clear button in the interface if needed
|
266 |
-
# clear_btn = gr.Button("ποΈ Clear Chat", variant="secondary", size="sm")
|
267 |
-
# clear_btn.click(clear_chat, outputs=[chatbot])
|
268 |
|
269 |
-
def
|
270 |
-
"""
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
"id": "tool_1",
|
282 |
-
"status": "done",
|
283 |
-
"duration": 3.2,
|
284 |
-
"log": "Server: text_to_image"
|
285 |
-
}
|
286 |
-
),
|
287 |
-
ChatMessage(
|
288 |
-
role="assistant",
|
289 |
-
content="[{'image': {'path': '/tmp/generated_image.png', 'url': 'https://example.com/image.png'}}]",
|
290 |
-
metadata={
|
291 |
-
"title": "π Tool Result",
|
292 |
-
"parent_id": "tool_1",
|
293 |
-
"status": "done"
|
294 |
-
}
|
295 |
-
),
|
296 |
-
ChatMessage(
|
297 |
-
role="assistant",
|
298 |
-
content="I've generated a beautiful sunset image for you! The image shows a stunning sunset over mountains with warm orange and pink colors filling the sky."
|
299 |
-
),
|
300 |
-
ChatMessage(
|
301 |
-
role="assistant",
|
302 |
-
content={"path": "https://example.com/image.png"}
|
303 |
-
)
|
304 |
-
]
|
305 |
-
|
306 |
-
return sample_messages
|
|
|
1 |
"""
|
2 |
+
UI Components for Universal MCP Client - Enhanced with Inference Provider Support
|
3 |
"""
|
4 |
import gradio as gr
|
5 |
from gradio import ChatMessage
|
|
|
11 |
from mcp_client import UniversalMCPClient
|
12 |
|
13 |
class UIComponents:
|
14 |
+
"""Manages Gradio UI components and event handlers with Inference Provider support"""
|
15 |
|
16 |
def __init__(self, mcp_client: UniversalMCPClient):
|
17 |
self.mcp_client = mcp_client
|
18 |
self.chat_handler = ChatHandler(mcp_client)
|
19 |
self.server_manager = ServerManager(mcp_client)
|
20 |
+
|
21 |
+
# State for current LLM backend
|
22 |
+
self.current_backend = "anthropic" # "anthropic" or "hf_inference"
|
23 |
+
self.current_provider = None
|
24 |
+
self.current_model = None
|
25 |
|
26 |
def create_interface(self) -> gr.Blocks:
|
27 |
+
"""Create the main Gradio interface with provider selection"""
|
28 |
with gr.Blocks(
|
29 |
title="Universal MCP Client",
|
30 |
theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(),
|
|
|
44 |
return demo
|
45 |
|
46 |
def _create_sidebar(self):
|
47 |
+
"""Create the sidebar with LLM provider selection and server management"""
|
48 |
with gr.Sidebar(elem_id="main-sidebar"):
|
49 |
gr.Markdown("# Gradio.chat.app")
|
50 |
|
51 |
+
# LLM Backend Selection Section
|
52 |
+
self._create_llm_backend_selection()
|
53 |
+
|
54 |
# Collapsible information section
|
55 |
with gr.Accordion("π Guide & Info", open=False):
|
56 |
gr.Markdown("""
|
57 |
## π― How To Use
|
58 |
+
- **Choose LLM Backend**: Select between Anthropic Claude or HuggingFace Inference Providers
|
59 |
+
- **Chat with LLM**: Interact with your selected model
|
60 |
+
- **MCP Integration**: Add MCP servers for enhanced capabilities
|
61 |
+
- **Subscribe**: PRO subscribers get higher usage on various services
|
62 |
|
63 |
## π New UI Features
|
64 |
+
- **Provider Selection**: Choose from multiple inference providers
|
65 |
- **Tool Usage**: See tool calls in collapsible accordions with π§ icons
|
66 |
- **Results**: View tool results in nested thoughts with π icons
|
67 |
- **Media Output**: Images, audio, and videos display separately from text
|
68 |
- **Real-time Status**: Watch tools execute with pending/done status indicators
|
69 |
""")
|
70 |
|
71 |
+
# Current backend status
|
72 |
+
self.backend_status = gr.Markdown("**Current Backend**: Not configured")
|
|
|
73 |
|
74 |
+
# Server management
|
75 |
+
self._create_server_management_section()
|
76 |
+
|
77 |
+
def _create_llm_backend_selection(self):
|
78 |
+
"""Create LLM backend selection interface"""
|
79 |
+
gr.Markdown("## π€ LLM Backend Selection")
|
80 |
+
|
81 |
+
# Radio buttons for backend selection
|
82 |
+
self.backend_radio = gr.Radio(
|
83 |
+
choices=[
|
84 |
+
("Anthropic Claude Sonnet 4", "anthropic"),
|
85 |
+
("HuggingFace Inference Providers", "hf_inference")
|
86 |
+
],
|
87 |
+
value="anthropic",
|
88 |
+
label="Choose LLM Backend",
|
89 |
+
info="Select your preferred language model backend"
|
90 |
+
)
|
91 |
+
|
92 |
+
# Anthropic configuration section
|
93 |
+
with gr.Column(visible=True, elem_classes="provider-selection anthropic-config") as self.anthropic_config:
|
94 |
+
gr.Markdown("### πΉ Anthropic Claude Configuration")
|
95 |
+
|
96 |
+
# Check if API key is available
|
97 |
+
if AppConfig.ANTHROPIC_API_KEY:
|
98 |
+
self.anthropic_status = gr.Markdown("β
**Status**: API key configured")
|
99 |
else:
|
100 |
+
self.anthropic_status = gr.Markdown("""
|
101 |
+
β **Status**: API key not found
|
102 |
+
|
103 |
+
**Setup Instructions**:
|
104 |
+
1. Go to Space Settings β Secrets
|
105 |
+
2. Add `ANTHROPIC_API_KEY` with your Anthropic API key
|
106 |
+
3. Restart the space
|
107 |
+
""")
|
108 |
+
|
109 |
+
# HuggingFace Inference Provider configuration section
|
110 |
+
with gr.Column(visible=False, elem_classes="provider-selection hf-config") as self.hf_config:
|
111 |
+
gr.Markdown("### πΈ HuggingFace Inference Provider Configuration")
|
112 |
|
113 |
+
# Check if HF token is available
|
114 |
+
if AppConfig.HF_TOKEN:
|
115 |
+
self.hf_status = gr.Markdown("β
**Status**: HF token configured")
|
116 |
+
else:
|
117 |
+
self.hf_status = gr.Markdown("""
|
118 |
+
β **Status**: HF token not found
|
119 |
+
|
120 |
+
**Setup Instructions**:
|
121 |
+
1. Go to Space Settings β Secrets
|
122 |
+
2. Add `HF_TOKEN` with your HuggingFace token
|
123 |
+
3. Restart the space
|
124 |
+
""")
|
125 |
+
|
126 |
+
# Provider dropdown
|
127 |
+
provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys())
|
128 |
+
self.provider_dropdown = gr.Dropdown(
|
129 |
+
choices=provider_choices,
|
130 |
+
label="π Select Inference Provider",
|
131 |
+
value=provider_choices[0] if provider_choices else None,
|
132 |
+
info="Choose your preferred inference provider"
|
133 |
+
)
|
134 |
+
|
135 |
+
# Model dropdown (initially empty, populated based on provider)
|
136 |
+
self.model_dropdown = gr.Dropdown(
|
137 |
+
choices=[],
|
138 |
+
label="π€ Select Model",
|
139 |
+
value=None,
|
140 |
+
info="Choose the specific model to use"
|
141 |
+
)
|
142 |
+
|
143 |
+
# Configure button
|
144 |
+
self.configure_btn = gr.Button(
|
145 |
+
"Configure Inference Provider",
|
146 |
+
variant="primary",
|
147 |
+
size="sm"
|
148 |
+
)
|
149 |
+
|
150 |
+
self.provider_config_status = gr.Textbox(
|
151 |
+
label="Configuration Status",
|
152 |
+
interactive=False,
|
153 |
+
visible=False
|
154 |
+
)
|
155 |
|
156 |
def _create_server_management_section(self):
|
157 |
"""Create the server management section in sidebar"""
|
158 |
+
gr.Markdown("## π§ MCP Server Management")
|
159 |
+
|
160 |
+
# Server status
|
161 |
+
self.server_count_display = gr.Markdown(f"**Connected Servers**: {len(self.mcp_client.servers)}")
|
162 |
+
|
163 |
+
if self.mcp_client.servers:
|
164 |
+
server_list = "\n".join([f"β’ **{name}**" for name in self.mcp_client.servers.keys()])
|
165 |
+
self.server_list_display = gr.Markdown(server_list)
|
166 |
+
else:
|
167 |
+
self.server_list_display = gr.Markdown("*No servers connected*\n\nAdd servers below.")
|
168 |
+
|
169 |
+
with gr.Accordion("βοΈ Manage MCP Servers", open=False):
|
170 |
gr.Markdown("**Add MCP Servers**")
|
171 |
|
172 |
# Get MCP spaces count for dropdown label
|
|
|
215 |
value=[
|
216 |
ChatMessage(
|
217 |
role="assistant",
|
218 |
+
content="Welcome! I'm your MCP-powered AI assistant. Please configure your LLM backend in the sidebar to get started. I can help you with various tasks using connected MCP servers!"
|
219 |
)
|
220 |
]
|
221 |
)
|
|
|
234 |
return chatbot
|
235 |
|
236 |
def _setup_event_handlers(self, chatbot: gr.Chatbot):
|
237 |
+
"""Set up all event handlers for the interface"""
|
238 |
+
|
239 |
+
# Backend selection event handlers
|
240 |
+
def handle_backend_change(backend_choice):
|
241 |
+
"""Handle LLM backend selection change"""
|
242 |
+
self.current_backend = backend_choice
|
243 |
+
|
244 |
+
# Update visibility of configuration sections
|
245 |
+
anthropic_visible = (backend_choice == "anthropic")
|
246 |
+
hf_visible = (backend_choice == "hf_inference")
|
247 |
+
|
248 |
+
# Update backend status
|
249 |
+
if backend_choice == "anthropic":
|
250 |
+
if AppConfig.ANTHROPIC_API_KEY:
|
251 |
+
status = "**Current Backend**: Anthropic Claude Sonnet 4 β
"
|
252 |
+
else:
|
253 |
+
status = "**Current Backend**: Anthropic Claude (β API key needed)"
|
254 |
+
else:
|
255 |
+
if AppConfig.HF_TOKEN:
|
256 |
+
status = "**Current Backend**: HF Inference Providers (βοΈ Configure provider)"
|
257 |
+
else:
|
258 |
+
status = "**Current Backend**: HF Inference Providers (β HF token needed)"
|
259 |
+
|
260 |
+
return (
|
261 |
+
gr.update(visible=anthropic_visible), # anthropic_config
|
262 |
+
gr.update(visible=hf_visible), # hf_config
|
263 |
+
status # backend_status
|
264 |
+
)
|
265 |
+
|
266 |
+
def handle_provider_change(provider):
|
267 |
+
"""Handle inference provider change"""
|
268 |
+
if not provider:
|
269 |
+
return gr.update(choices=[], value=None)
|
270 |
+
|
271 |
+
models = AppConfig.get_provider_models(provider)
|
272 |
+
return gr.update(choices=models, value=models[0] if models else None)
|
273 |
+
|
274 |
+
def handle_provider_configuration(provider, model):
|
275 |
+
"""Handle inference provider configuration"""
|
276 |
+
if not provider or not model:
|
277 |
+
return "β Please select both provider and model", gr.update(visible=True)
|
278 |
+
|
279 |
+
if not AppConfig.HF_TOKEN:
|
280 |
+
return "β HF_TOKEN not configured. Please add it to space secrets.", gr.update(visible=True)
|
281 |
+
|
282 |
+
# Configure the MCP client with the selected provider
|
283 |
+
success = self.mcp_client.configure_inference_provider(provider, model)
|
284 |
+
|
285 |
+
if success:
|
286 |
+
self.current_provider = provider
|
287 |
+
self.current_model = model
|
288 |
+
|
289 |
+
# Update chat handler
|
290 |
+
self.chat_handler.mcp_client = self.mcp_client
|
291 |
+
|
292 |
+
status_msg = f"β
Configured {provider} with {model}"
|
293 |
+
|
294 |
+
# Update backend status
|
295 |
+
backend_status = f"**Current Backend**: {provider}/{model} β
"
|
296 |
+
|
297 |
+
return status_msg, gr.update(visible=True)
|
298 |
+
else:
|
299 |
+
return "β Failed to configure inference provider", gr.update(visible=True)
|
300 |
|
301 |
# Chat event handlers
|
302 |
def submit_message(message, history):
|
303 |
"""Handle message submission with ChatMessage support"""
|
304 |
+
# Check if backend is properly configured
|
305 |
+
if self.current_backend == "anthropic" and not AppConfig.ANTHROPIC_API_KEY:
|
306 |
+
error_msg = "β Please configure Anthropic API key in space settings first."
|
307 |
+
history.append(ChatMessage(role="assistant", content=error_msg))
|
308 |
+
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
309 |
+
|
310 |
+
if self.current_backend == "hf_inference" and (not self.current_provider or not self.current_model):
|
311 |
+
error_msg = "β Please configure HuggingFace inference provider first."
|
312 |
+
history.append(ChatMessage(role="assistant", content=error_msg))
|
313 |
+
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
314 |
+
|
315 |
if message and (message.get("text", "").strip() or message.get("files", [])):
|
316 |
# Convert existing history to ChatMessage objects if needed
|
317 |
converted_history = []
|
|
|
335 |
"""Re-enable input after processing"""
|
336 |
return gr.MultimodalTextbox(interactive=True)
|
337 |
|
338 |
+
# Connect backend selection events
|
339 |
+
self.backend_radio.change(
|
340 |
+
handle_backend_change,
|
341 |
+
inputs=[self.backend_radio],
|
342 |
+
outputs=[self.anthropic_config, self.hf_config, self.backend_status]
|
343 |
+
)
|
344 |
+
|
345 |
+
# Connect provider selection events
|
346 |
+
self.provider_dropdown.change(
|
347 |
+
handle_provider_change,
|
348 |
+
inputs=[self.provider_dropdown],
|
349 |
+
outputs=[self.model_dropdown]
|
350 |
+
)
|
351 |
+
|
352 |
+
# Connect provider configuration
|
353 |
+
self.configure_btn.click(
|
354 |
+
handle_provider_configuration,
|
355 |
+
inputs=[self.provider_dropdown, self.model_dropdown],
|
356 |
+
outputs=[self.provider_config_status, self.provider_config_status]
|
357 |
+
).then(
|
358 |
+
lambda: self._update_backend_status(),
|
359 |
+
outputs=[self.backend_status]
|
360 |
+
)
|
361 |
+
|
362 |
# Set up the chat flow - using submit event
|
363 |
chat_msg_enter = self.chat_input.submit(
|
364 |
submit_message,
|
|
|
421 |
# Connect server management events
|
422 |
self.add_server_btn.click(
|
423 |
handle_add_server,
|
424 |
+
inputs=[self.server_name, self.mcp_dropdown],
|
425 |
outputs=[
|
426 |
self.add_server_output,
|
427 |
self.add_server_details,
|
|
|
441 |
handle_refresh_spaces,
|
442 |
outputs=[self.mcp_dropdown]
|
443 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
444 |
|
445 |
+
def _update_backend_status(self):
|
446 |
+
"""Update backend status based on current configuration"""
|
447 |
+
if self.current_backend == "anthropic":
|
448 |
+
if AppConfig.ANTHROPIC_API_KEY:
|
449 |
+
return "**Current Backend**: Anthropic Claude Sonnet 4 β
"
|
450 |
+
else:
|
451 |
+
return "**Current Backend**: Anthropic Claude (β API key needed)"
|
452 |
+
else:
|
453 |
+
if self.current_provider and self.current_model:
|
454 |
+
return f"**Current Backend**: {self.current_provider}/{self.current_model} β
"
|
455 |
+
else:
|
456 |
+
return "**Current Backend**: HF Inference Providers (βοΈ Configure provider)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|