Spaces:
Sleeping
Sleeping
File size: 20,256 Bytes
ada4b72 7d8747b ada4b72 a3254a7 ada4b72 7515eed ada4b72 7d8747b ada4b72 7d8747b ada4b72 7d8747b ada4b72 7d8747b 69bcbac ada4b72 7d8747b ada4b72 7515eed ada4b72 7515eed 7d8747b a3254a7 7d8747b a3254a7 ada4b72 7d8747b ada4b72 7d8747b ada4b72 7d8747b ada4b72 7d8747b ada4b72 7d8747b 7515eed ada4b72 e31f116 aba8826 e31f116 aba8826 e31f116 c2877c6 16df101 ada4b72 7515eed ada4b72 7515eed ada4b72 16df101 ada4b72 a3254a7 ada4b72 a3254a7 ada4b72 a3254a7 ada4b72 a3254a7 7d8747b a3254a7 ada4b72 7d8747b ada4b72 a3254a7 7d8747b ada4b72 e6dde25 ada4b72 a3254a7 ada4b72 7d8747b aba8826 ada4b72 16df101 ada4b72 16df101 e31f116 16df101 e31f116 16df101 ada4b72 e31f116 ada4b72 16df101 e31f116 16df101 ada4b72 7d8747b ada4b72 e31f116 ada4b72 16df101 a3254a7 7d8747b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 |
"""
UI Components for Universal MCP Client - Enhanced with Inference Provider Support
"""
import gradio as gr
from gradio import ChatMessage
from typing import Tuple, List, Dict, Any
from config import AppConfig, CUSTOM_CSS
from chat_handler import ChatHandler
from server_manager import ServerManager
from mcp_client import UniversalMCPClient
class UIComponents:
"""Manages Gradio UI components and event handlers with Inference Provider support"""
def __init__(self, mcp_client: UniversalMCPClient):
self.mcp_client = mcp_client
self.chat_handler = ChatHandler(mcp_client)
self.server_manager = ServerManager(mcp_client)
# State for current LLM backend
self.current_backend = "anthropic" # "anthropic" or "hf_inference"
self.current_provider = None
self.current_model = None
def create_interface(self) -> gr.Blocks:
"""Create the main Gradio interface with provider selection"""
with gr.Blocks(
title="Universal MCP Client",
theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(),
fill_height=True,
css=CUSTOM_CSS
) as demo:
# Create sidebar
self._create_sidebar()
# Create main chat area
chatbot = self._create_main_chat_area()
# Set up event handlers
self._setup_event_handlers(chatbot)
return demo
def _create_sidebar(self):
"""Create the sidebar with LLM provider selection and server management"""
with gr.Sidebar(elem_id="main-sidebar"):
gr.Markdown("# Gradio.chat.app")
# LLM Backend Selection Section
self._create_llm_backend_selection()
# Collapsible information section
with gr.Accordion("π Guide & Info", open=False):
gr.Markdown("""
## π― How To Use
- **Choose LLM Backend**: Select between Anthropic Claude or HuggingFace Inference Providers
- **Chat with LLM**: Interact with your selected model
- **MCP Integration**: Add MCP servers for enhanced capabilities
- **Subscribe**: PRO subscribers get higher usage on various services
## π New UI Features
- **Provider Selection**: Choose from multiple inference providers
- **Tool Usage**: See tool calls in collapsible accordions with π§ icons
- **Results**: View tool results in nested thoughts with π icons
- **Media Output**: Images, audio, and videos display separately from text
- **Real-time Status**: Watch tools execute with pending/done status indicators
""")
# Current backend status
self.backend_status = gr.Markdown("**Current Backend**: Not configured")
# Server management
self._create_server_management_section()
def _create_llm_backend_selection(self):
"""Create LLM backend selection interface"""
gr.Markdown("## π€ LLM Backend Selection")
# Radio buttons for backend selection
self.backend_radio = gr.Radio(
choices=[
("Anthropic Claude Sonnet 4", "anthropic"),
("HuggingFace Inference Providers", "hf_inference")
],
value="anthropic",
label="Choose LLM Backend",
info="Select your preferred language model backend"
)
# Anthropic configuration section
with gr.Column(visible=True, elem_classes="provider-selection anthropic-config") as self.anthropic_config:
gr.Markdown("### πΉ Anthropic Claude Configuration")
# Check if API key is available
if AppConfig.ANTHROPIC_API_KEY:
self.anthropic_status = gr.Markdown("β
**Status**: API key configured")
else:
self.anthropic_status = gr.Markdown("""
β **Status**: API key not found
**Setup Instructions**:
1. Go to Space Settings β Secrets
2. Add `ANTHROPIC_API_KEY` with your Anthropic API key
3. Restart the space
""")
# HuggingFace Inference Provider configuration section
with gr.Column(visible=False, elem_classes="provider-selection hf-config") as self.hf_config:
gr.Markdown("### πΈ HuggingFace Inference Provider Configuration")
# Check if HF token is available
if AppConfig.HF_TOKEN:
self.hf_status = gr.Markdown("β
**Status**: HF token configured")
else:
self.hf_status = gr.Markdown("""
β **Status**: HF token not found
**Setup Instructions**:
1. Go to Space Settings β Secrets
2. Add `HF_TOKEN` with your HuggingFace token
3. Restart the space
""")
# Provider dropdown
provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys())
self.provider_dropdown = gr.Dropdown(
choices=provider_choices,
label="π Select Inference Provider",
value=provider_choices[0] if provider_choices else None,
info="Choose your preferred inference provider"
)
# Model dropdown (initially empty, populated based on provider)
self.model_dropdown = gr.Dropdown(
choices=[],
label="π€ Select Model",
value=None,
info="Choose the specific model to use"
)
# Configure button
self.configure_btn = gr.Button(
"Configure Inference Provider",
variant="primary",
size="sm"
)
self.provider_config_status = gr.Textbox(
label="Configuration Status",
interactive=False,
visible=False
)
def _create_server_management_section(self):
"""Create the server management section in sidebar"""
gr.Markdown("## π§ MCP Server Management")
# Server status
self.server_count_display = gr.Markdown(f"**Connected Servers**: {len(self.mcp_client.servers)}")
if self.mcp_client.servers:
server_list = "\n".join([f"β’ **{name}**" for name in self.mcp_client.servers.keys()])
self.server_list_display = gr.Markdown(server_list)
else:
self.server_list_display = gr.Markdown("*No servers connected*\n\nAdd servers below.")
with gr.Accordion("βοΈ Manage MCP Servers", open=False):
gr.Markdown("**Add MCP Servers**")
# Get MCP spaces count for dropdown label
from mcp_spaces_finder import _finder
spaces = _finder.get_mcp_spaces()
spaces_count = len(spaces)
# Create MCP spaces dropdown
self.mcp_dropdown = gr.Dropdown(
choices=spaces,
label=f"π€ Select from {spaces_count} Available MCP Servers",
value=None,
info="Choose a HuggingFace space that provides MCP server functionality",
allow_custom_value=True
)
# Server title input
self.server_name = gr.Textbox(
label="Give Server a Title",
placeholder="For Example, Text to Image Generator"
)
self.add_server_btn = gr.Button("Add Server", variant="primary")
self.add_server_output = gr.Textbox(label="Status", interactive=False, container=False)
self.add_server_details = gr.HTML(label="Details")
# Refresh button for MCP spaces
self.refresh_spaces_btn = gr.Button("π Refresh Available Spaces", variant="secondary", size="sm")
self.status_btn = gr.Button("Refresh Status", variant="secondary")
self.status_count = gr.Markdown("**Total MCP Servers**: 0")
self.status_output = gr.HTML()
def _create_main_chat_area(self) -> gr.Chatbot:
"""Create the main chat area with ChatMessage support"""
with gr.Column(elem_classes="main-content"):
# Chatbot takes most of the space - configured for ChatMessage
chatbot = gr.Chatbot(
label="Universal MCP-Powered Multimodal Chatbot",
show_label=False,
type="messages", # Essential for ChatMessage support
scale=1, # Expand to fill available space
show_copy_button=True,
avatar_images=None,
# Initial welcome message with proper ChatMessage format
value=[
ChatMessage(
role="assistant",
content="Welcome! I'm your MCP-powered AI assistant. Please configure your LLM backend in the sidebar to get started. I can help you with various tasks using connected MCP servers!"
)
]
)
# Input area at bottom - fixed size
with gr.Column(scale=0, elem_classes="input-area"):
self.chat_input = gr.MultimodalTextbox(
interactive=True,
file_count="multiple",
placeholder="Enter message or upload files (images, audio, video, documents)...",
show_label=False,
sources=["upload", "microphone"],
file_types=None # Accept all file types
)
return chatbot
def _setup_event_handlers(self, chatbot: gr.Chatbot):
"""Set up all event handlers for the interface"""
# Backend selection event handlers
def handle_backend_change(backend_choice):
"""Handle LLM backend selection change"""
self.current_backend = backend_choice
# Update visibility of configuration sections
anthropic_visible = (backend_choice == "anthropic")
hf_visible = (backend_choice == "hf_inference")
# Update backend status
if backend_choice == "anthropic":
if AppConfig.ANTHROPIC_API_KEY:
status = "**Current Backend**: Anthropic Claude Sonnet 4 β
"
else:
status = "**Current Backend**: Anthropic Claude (β API key needed)"
else:
if AppConfig.HF_TOKEN:
status = "**Current Backend**: HF Inference Providers (βοΈ Configure provider)"
else:
status = "**Current Backend**: HF Inference Providers (β HF token needed)"
return (
gr.update(visible=anthropic_visible), # anthropic_config
gr.update(visible=hf_visible), # hf_config
status # backend_status
)
def handle_provider_change(provider):
"""Handle inference provider change"""
if not provider:
return gr.update(choices=[], value=None)
models = AppConfig.get_provider_models(provider)
return gr.update(choices=models, value=models[0] if models else None)
def handle_provider_configuration(provider, model):
"""Handle inference provider configuration"""
if not provider or not model:
return "β Please select both provider and model", gr.update(visible=True)
if not AppConfig.HF_TOKEN:
return "β HF_TOKEN not configured. Please add it to space secrets.", gr.update(visible=True)
# Configure the MCP client with the selected provider
success = self.mcp_client.configure_inference_provider(provider, model)
if success:
self.current_provider = provider
self.current_model = model
# Update chat handler
self.chat_handler.mcp_client = self.mcp_client
status_msg = f"β
Configured {provider} with {model}"
# Update backend status
backend_status = f"**Current Backend**: {provider}/{model} β
"
return status_msg, gr.update(visible=True)
else:
return "β Failed to configure inference provider", gr.update(visible=True)
# Chat event handlers
def submit_message(message, history):
"""Handle message submission with ChatMessage support"""
# Check if backend is properly configured
if self.current_backend == "anthropic" and not AppConfig.ANTHROPIC_API_KEY:
error_msg = "β Please configure Anthropic API key in space settings first."
history.append(ChatMessage(role="assistant", content=error_msg))
return history, gr.MultimodalTextbox(value=None, interactive=False)
if self.current_backend == "hf_inference" and (not self.current_provider or not self.current_model):
error_msg = "β Please configure HuggingFace inference provider first."
history.append(ChatMessage(role="assistant", content=error_msg))
return history, gr.MultimodalTextbox(value=None, interactive=False)
if message and (message.get("text", "").strip() or message.get("files", [])):
# Convert existing history to ChatMessage objects if needed
converted_history = []
for msg in history:
if isinstance(msg, dict):
# Convert dict to ChatMessage
converted_history.append(ChatMessage(
role=msg.get('role', 'assistant'),
content=msg.get('content', ''),
metadata=msg.get('metadata', None)
))
else:
# Already a ChatMessage
converted_history.append(msg)
new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history)
return new_history, cleared_input
return history, gr.MultimodalTextbox(value=None, interactive=False)
def enable_input():
"""Re-enable input after processing"""
return gr.MultimodalTextbox(interactive=True)
# Connect backend selection events
self.backend_radio.change(
handle_backend_change,
inputs=[self.backend_radio],
outputs=[self.anthropic_config, self.hf_config, self.backend_status]
)
# Connect provider selection events
self.provider_dropdown.change(
handle_provider_change,
inputs=[self.provider_dropdown],
outputs=[self.model_dropdown]
)
# Connect provider configuration
self.configure_btn.click(
handle_provider_configuration,
inputs=[self.provider_dropdown, self.model_dropdown],
outputs=[self.provider_config_status, self.provider_config_status]
).then(
lambda: self._update_backend_status(),
outputs=[self.backend_status]
)
# Set up the chat flow - using submit event
chat_msg_enter = self.chat_input.submit(
submit_message,
inputs=[self.chat_input, chatbot],
outputs=[chatbot, self.chat_input]
)
chat_msg_enter.then(enable_input, None, [self.chat_input])
# Server management event handlers
def update_server_display():
"""Update the server status display in sidebar"""
server_count = len(self.mcp_client.servers)
count_text = f"**Connected Servers**: {server_count}"
if self.mcp_client.servers:
server_list = "\n".join([f"β’ **{name}**" for name in self.mcp_client.servers.keys()])
return count_text, server_list
else:
return count_text, "*No servers connected*\n\nAdd servers below."
def handle_add_server(server_title, selected_space):
"""Handle adding a server and update displays"""
# Check if both title and space are provided
if not server_title or not server_title.strip():
return "β Please provide a server title", "", *update_server_display(), ""
if not selected_space:
return "β Please select a HuggingFace space from the dropdown", "", *update_server_display(), ""
# Use the selected space from dropdown
status_msg, details_html = self.server_manager.add_custom_server(server_title.strip(), selected_space)
# Update sidebar server display
count_text, list_text = update_server_display()
return status_msg, details_html, count_text, list_text, "" # Clear server name input
def handle_refresh_status():
"""Handle refresh status button"""
count_text, accordions_html = self.server_manager.get_server_status()
return count_text, accordions_html
def handle_refresh_spaces():
"""Handle refresh MCP spaces button"""
from mcp_spaces_finder import refresh_mcp_spaces, _finder
# Clear cache and get fresh spaces
refresh_mcp_spaces()
spaces = _finder.get_mcp_spaces()
spaces_count = len(spaces)
# Update dropdown choices with new count
return gr.Dropdown(
choices=spaces,
value=None,
label=f"π€ Select from {spaces_count} Available MCP Server Spaces",
info="Choose a HuggingFace space that provides MCP server functionality"
)
# Connect server management events
self.add_server_btn.click(
handle_add_server,
inputs=[self.server_name, self.mcp_dropdown],
outputs=[
self.add_server_output,
self.add_server_details,
self.server_count_display,
self.server_list_display,
self.server_name # Clear server name input
]
)
self.status_btn.click(
handle_refresh_status,
outputs=[self.status_count, self.status_output]
)
# Connect refresh spaces button
self.refresh_spaces_btn.click(
handle_refresh_spaces,
outputs=[self.mcp_dropdown]
)
def _update_backend_status(self):
"""Update backend status based on current configuration"""
if self.current_backend == "anthropic":
if AppConfig.ANTHROPIC_API_KEY:
return "**Current Backend**: Anthropic Claude Sonnet 4 β
"
else:
return "**Current Backend**: Anthropic Claude (β API key needed)"
else:
if self.current_provider and self.current_model:
return f"**Current Backend**: {self.current_provider}/{self.current_model} β
"
else:
return "**Current Backend**: HF Inference Providers (βοΈ Configure provider)" |