Spaces:
Sleeping
Sleeping
| """ | |
| UI Components for Universal MCP Client - Fixed with optimal MCP guidance | |
| """ | |
| import gradio as gr | |
| from gradio import ChatMessage | |
| from typing import Tuple, List, Dict, Any | |
| import os | |
| import logging | |
| import traceback | |
| from openai import OpenAI | |
| from config import AppConfig, CUSTOM_CSS, HF_HUB_AVAILABLE | |
| from chat_handler import ChatHandler | |
| from server_manager import ServerManager | |
| from mcp_client import UniversalMCPClient | |
| # Import HuggingFace Hub for login functionality | |
| if HF_HUB_AVAILABLE: | |
| from huggingface_hub import login, logout, whoami | |
| from huggingface_hub.utils import HfHubHTTPError | |
| logger = logging.getLogger(__name__) | |
| class UIComponents: | |
| """Manages Gradio UI components with improved MCP server management""" | |
| def __init__(self, mcp_client: UniversalMCPClient): | |
| self.mcp_client = mcp_client | |
| self.chat_handler = ChatHandler(mcp_client) | |
| self.server_manager = ServerManager(mcp_client) | |
| self.current_user = None | |
| def _initialize_default_servers(self): | |
| """Initialize default MCP servers on app startup""" | |
| default_servers = [ | |
| ("background removal", "ysharma/background-removal-mcp"), | |
| ("text to video", "ysharma/ltx-video-distilled"), | |
| ("text to speech", "ysharma/Kokoro-TTS-mcp-test"), | |
| ("text to image", "ysharma/dalle-3-xl-lora-v2") | |
| ] | |
| logger.info("🚀 Initializing default MCP servers...") | |
| for server_name, space_id in default_servers: | |
| try: | |
| status_msg, _ = self.server_manager.add_custom_server(server_name, space_id) | |
| if "✅" in status_msg: | |
| logger.info(f"✅ Added default server: {server_name}") | |
| else: | |
| logger.warning(f"⚠️ Failed to add default server {server_name}: {status_msg}") | |
| except Exception as e: | |
| logger.error(f"❌ Error adding default server {server_name}: {e}") | |
| logger.info(f"📊 Initialized {len(self.mcp_client.servers)} default servers") | |
| def create_interface(self) -> gr.Blocks: | |
| """Create the main Gradio interface with improved layout""" | |
| with gr.Blocks( | |
| title="Universal MCP Client - HF Inference Powered", | |
| theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(), | |
| fill_height=True, | |
| css=CUSTOM_CSS | |
| ) as demo: | |
| # Create sidebar | |
| self._create_sidebar() | |
| # Create main chat area | |
| chatbot = self._create_main_chat_area() | |
| # Set up event handlers | |
| self._setup_event_handlers(chatbot, demo) | |
| return demo | |
| def _create_sidebar(self): | |
| """Create the sidebar with login, provider/model selection, and server management""" | |
| with gr.Sidebar(elem_id="main-sidebar"): | |
| gr.Markdown("# 🤗 chat.gradio.app") | |
| # HuggingFace Login Section | |
| self._create_login_section() | |
| # Provider and Model Selection with defaults | |
| self._create_provider_model_selection() | |
| # MCP Server Management | |
| self._create_server_management_section() | |
| # Collapsible information section | |
| with gr.Accordion("📚 Guide & Info", open=False): | |
| gr.Markdown(""" | |
| ## 🎯 How To Use | |
| 1. **Login**: Login with your HuggingFace account for API access | |
| 2. **Add MCP Servers**: Connect to various AI tools on 🤗Hub | |
| 3. **Enable/Disable Servers**: Use checkboxes to control which servers are active | |
| 4. **Chat**: Interact with GPT-OSS and use connected MCP Servers | |
| ## 💭 Features | |
| - **GPT-OSS Models**: OpenAI's latest open-source reasoning models (128k context) | |
| - **MCP Integration**: Connect to thousands of AI apps on Hub via MCP protocol | |
| - **Multi-Provider**: Access via Cerebras, Fireworks, Together AI, and others | |
| - **Media Support**: Automatic embedding of media -- images, audio, and video etc | |
| """) | |
| def _create_login_section(self): | |
| """Create HuggingFace OAuth login section""" | |
| with gr.Group(elem_classes="login-section"): | |
| gr.Markdown("## 🔑 Authentication", container=True) | |
| self.login_button = gr.LoginButton( | |
| value="Sign in with Hugging Face", | |
| size="sm" | |
| ) | |
| self.login_status = gr.Markdown("⚪ Please sign in to access Inference Providers", container=True) | |
| def _create_provider_model_selection(self): | |
| """Create provider and model selection dropdowns with defaults""" | |
| with gr.Group(elem_classes="provider-model-selection"): | |
| gr.Markdown("## 🚀 Inference Configuration", container=True) | |
| # Provider dropdown with default selection | |
| provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys()) | |
| self.provider_dropdown = gr.Dropdown( | |
| choices=provider_choices, | |
| label="🔧 Inference Provider", | |
| value="cerebras", # Default to Cerebras | |
| info="Choose your preferred inference provider" | |
| ) | |
| # Model dropdown (will be populated based on provider) | |
| self.model_dropdown = gr.Dropdown( | |
| choices=[], | |
| label="🤖 Model", | |
| value=None, | |
| info="Select GPT OSS model variant" | |
| ) | |
| # Status display | |
| self.api_status = gr.Markdown("⚪ Select provider and model to begin", container=True) | |
| def _create_server_management_section(self): | |
| """Create the server management section with checkboxes and guidance""" | |
| with gr.Group(): | |
| gr.Markdown("## 🔧 MCP Servers", container=True) | |
| # ADDED: Optimal server count guidance | |
| gr.Markdown(""" | |
| <div style="background: #f0f8ff; padding: 10px; border-radius: 5px; border-left: 3px solid #4169e1; margin-bottom: 10px;"> | |
| <strong>💡 Best Practice:</strong> For optimal performance, we recommend keeping | |
| <strong>3-6 MCP servers</strong> enabled at once. Too many servers can: | |
| • Increase context usage (reducing available tokens for conversation) | |
| • Potentially confuse the model when selecting tools | |
| • Slow down response times | |
| You can add more servers but selectively enable only the ones you need for your current task. | |
| </div> | |
| """, container=True) | |
| # Server controls | |
| with gr.Row(): | |
| self.add_server_btn = gr.Button("Add MCP Server", variant="primary", size="sm") | |
| self.remove_all_btn = gr.Button("Remove All", variant="secondary", size="sm") | |
| # Add a save button (initially hidden) | |
| self.save_server_btn = gr.Button("Save Server", variant="primary", size="sm", visible=False) | |
| # MCP server selection | |
| from mcp_spaces_finder import _finder | |
| spaces = _finder.get_mcp_spaces() | |
| self.mcp_dropdown = gr.Dropdown( | |
| choices=spaces, | |
| label=f"**Available MCP Servers ({len(spaces)}**)", | |
| value=None, | |
| info="Choose from HuggingFace spaces", | |
| allow_custom_value=True, | |
| visible=False | |
| ) | |
| self.server_name = gr.Textbox( | |
| label="Server Title", | |
| placeholder="e.g., Text to Image Generator", | |
| visible=False | |
| ) | |
| # Server status and controls | |
| self.server_checkboxes = gr.CheckboxGroup( | |
| label="Active Servers (Check to enable)", | |
| choices=[], | |
| value=[], | |
| info="✅ Enabled servers can be used | ⬜ Disabled servers are ignored" | |
| ) | |
| self.add_server_output = gr.Markdown("", visible=False, container=True) | |
| def _create_main_chat_area(self) -> gr.Chatbot: | |
| """Create the main chat area""" | |
| with gr.Column(elem_classes="main-content"): | |
| chatbot = gr.Chatbot( | |
| label="Universal MCP-Powered AI Assistant", | |
| show_label=False, | |
| type="messages", | |
| scale=1, | |
| show_copy_button=True, | |
| avatar_images=None, | |
| value=[ | |
| ChatMessage( | |
| role="assistant", | |
| content="""Welcome! I'm your MCP-powered AI assistant using OpenAI's GPT-OSS models via HuggingFace Inference Providers. | |
| 🎉 **Pre-loaded MCP servers ready to use:** | |
| - **background removal** - Remove backgrounds from images | |
| - **text to video** - Generate videos from text descriptions | |
| - **text to speech** - Convert text to natural speech | |
| - **text to image** - Create images from text prompts | |
| You can start using these servers right away, add more servers, or remove them as needed. Try asking me to generate an image, create speech, or any other task!""" | |
| ) | |
| ] | |
| ) | |
| with gr.Column(scale=0, elem_classes="input-area"): | |
| self.chat_input = gr.MultimodalTextbox( | |
| interactive=True, | |
| file_count="multiple", | |
| placeholder="Enter message or upload files...", | |
| show_label=False, | |
| sources=["upload", "microphone"], | |
| file_types=None | |
| ) | |
| return chatbot | |
| def _setup_event_handlers(self, chatbot: gr.Chatbot, demo: gr.Blocks): | |
| """Set up all event handlers""" | |
| # OAuth profile handler | |
| def handle_oauth_profile(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None): | |
| if profile is None: | |
| return "⚪ Please sign in to access Inference Providers" | |
| logger.info(f"👤 OAuth profile received for user: {profile.name}") | |
| if token and token.token: | |
| logger.info("🔑 OAuth token received, updating HF client...") | |
| os.environ["HF_TOKEN"] = token.token | |
| try: | |
| self.mcp_client.hf_client = OpenAI( | |
| base_url="https://router.huggingface.co/v1", | |
| api_key=token.token | |
| ) | |
| logger.info("✅ HuggingFace Inference client updated with OAuth token") | |
| except Exception as e: | |
| logger.error(f"❌ Failed to update HF client: {e}") | |
| return f"✅ Signed in as: **{profile.name}**" | |
| # Provider selection with auto-model loading | |
| def handle_provider_change(provider_id): | |
| if not provider_id: | |
| return gr.Dropdown(choices=[], value=None), "⚪ Select provider first" | |
| available_models = AppConfig.get_available_models_for_provider(provider_id) | |
| model_choices = [(AppConfig.AVAILABLE_MODELS[model]["name"], model) for model in available_models] | |
| # Auto-select 120b model if available | |
| default_model = "openai/gpt-oss-120b" if "openai/gpt-oss-120b" in available_models else (available_models[0] if available_models else None) | |
| # Get context info for status | |
| if default_model: | |
| model_info = AppConfig.AVAILABLE_MODELS.get(default_model, {}) | |
| context_length = model_info.get("context_length", 128000) | |
| status_msg = f"✅ Provider selected, model auto-selected ({context_length:,} token context)" | |
| else: | |
| status_msg = "✅ Provider selected, please select a model" | |
| return ( | |
| gr.Dropdown(choices=model_choices, value=default_model, label="🤖 Model"), | |
| status_msg | |
| ) | |
| # Model selection | |
| def handle_model_change(provider_id, model_id): | |
| if not provider_id or not model_id: | |
| return "⚪ Select both provider and model" | |
| self.mcp_client.set_model_and_provider(provider_id, model_id) | |
| # Get model info | |
| model_info = AppConfig.AVAILABLE_MODELS.get(model_id, {}) | |
| context_length = model_info.get("context_length", 128000) | |
| active_params = model_info.get("active_params", "N/A") | |
| if self.mcp_client.hf_client: | |
| return f"✅ Ready! Using {active_params} active params, {context_length:,} token context" | |
| else: | |
| return "❌ Please login first" | |
| # Chat handlers | |
| def submit_message(message, history): | |
| if message and (message.get("text", "").strip() or message.get("files", [])): | |
| converted_history = [] | |
| for msg in history: | |
| if isinstance(msg, dict): | |
| converted_history.append(ChatMessage( | |
| role=msg.get('role', 'assistant'), | |
| content=msg.get('content', ''), | |
| metadata=msg.get('metadata', None) | |
| )) | |
| else: | |
| converted_history.append(msg) | |
| new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history) | |
| return new_history, cleared_input | |
| return history, gr.MultimodalTextbox(value=None, interactive=False) | |
| def enable_input(): | |
| return gr.MultimodalTextbox(interactive=True) | |
| def show_add_server_fields(): | |
| return [ | |
| gr.Dropdown(visible=True), # mcp_dropdown | |
| gr.Textbox(visible=True), # server_name | |
| gr.Button(interactive=False), # add_server_btn - disable it | |
| gr.Button(visible=True) # save_server_btn - show it | |
| ] | |
| def hide_add_server_fields(): | |
| return [ | |
| gr.Dropdown(visible=False, value=None), # mcp_dropdown | |
| gr.Textbox(visible=False, value=""), # server_name | |
| gr.Button(interactive=True), # add_server_btn - re-enable it | |
| gr.Button(visible=False) # save_server_btn - hide it | |
| ] | |
| def handle_add_server(server_title, selected_space): | |
| if not server_title or not selected_space: | |
| return [ | |
| gr.Dropdown(visible=False, value=None), | |
| gr.Textbox(visible=False, value=""), | |
| gr.Button(interactive=True), # Re-enable add button | |
| gr.Button(visible=False), # Hide save button | |
| gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()), | |
| value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]), | |
| gr.Markdown("❌ Please provide both server title and space selection", visible=True) | |
| ] | |
| try: | |
| status_msg, _ = self.server_manager.add_custom_server(server_title.strip(), selected_space) | |
| # Update checkboxes with all servers | |
| server_choices = list(self.mcp_client.servers.keys()) | |
| enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled] | |
| # Check if we have many servers and show a warning | |
| warning_msg = "" | |
| if len(enabled_servers) > 6: | |
| warning_msg = "\n\n⚠️ **Note:** You have more than 6 servers enabled. Consider disabling some for better performance." | |
| return [ | |
| gr.Dropdown(visible=False, value=None), | |
| gr.Textbox(visible=False, value=""), | |
| gr.Button(interactive=True), # Re-enable add button | |
| gr.Button(visible=False), # Hide save button | |
| gr.CheckboxGroup(choices=server_choices, value=enabled_servers), | |
| gr.Markdown(status_msg + warning_msg, visible=True) | |
| ] | |
| except Exception as e: | |
| logger.error(f"Error adding server: {e}") | |
| return [ | |
| gr.Dropdown(visible=False, value=None), | |
| gr.Textbox(visible=False, value=""), | |
| gr.Button(interactive=True), # Re-enable add button | |
| gr.Button(visible=False), # Hide save button | |
| gr.CheckboxGroup(choices=list(self.mcp_client.servers.keys()), | |
| value=[name for name, enabled in self.mcp_client.enabled_servers.items() if enabled]), | |
| gr.Markdown(f"❌ Error: {str(e)}", visible=True) | |
| ] | |
| def handle_server_toggle(enabled_servers): | |
| """Handle enabling/disabling servers via checkboxes""" | |
| # Update enabled status for all servers | |
| for server_name in self.mcp_client.servers.keys(): | |
| self.mcp_client.enable_server(server_name, server_name in enabled_servers) | |
| enabled_count = len(enabled_servers) | |
| # Provide feedback based on count | |
| if enabled_count == 0: | |
| message = "ℹ️ No servers enabled - chatbot will use native capabilities only" | |
| elif enabled_count <= 6: | |
| message = f"✅ {enabled_count} server{'s' if enabled_count != 1 else ''} enabled - optimal configuration" | |
| else: | |
| message = f"⚠️ {enabled_count} servers enabled - consider reducing to 3-6 for better performance" | |
| return gr.Markdown(message, visible=True) | |
| def handle_remove_all(): | |
| """Remove all MCP servers""" | |
| count = self.mcp_client.remove_all_servers() | |
| return [ | |
| gr.CheckboxGroup(choices=[], value=[]), | |
| gr.Markdown(f"✅ Removed all {count} servers", visible=True) | |
| ] | |
| # Load handler to initialize default mcp servers | |
| def initialize_defaults(): | |
| """Initialize default servers and update UI on app load""" | |
| self._initialize_default_servers() | |
| # Return updated checkboxes with the default servers | |
| server_choices = list(self.mcp_client.servers.keys()) | |
| enabled_servers = [name for name, enabled in self.mcp_client.enabled_servers.items() if enabled] | |
| return gr.CheckboxGroup( | |
| choices=server_choices, | |
| value=enabled_servers, | |
| label=f"Active Servers ({len(server_choices)} loaded)" | |
| ) | |
| # Connect OAuth | |
| demo.load( | |
| fn=handle_oauth_profile, | |
| outputs=[self.login_status] | |
| ) | |
| # Connect provider/model dropdowns with auto-selection on load | |
| demo.load( | |
| fn=lambda: handle_provider_change("cerebras"), | |
| outputs=[self.model_dropdown, self.api_status] | |
| ) | |
| # Initialise default mcp server load | |
| demo.load( | |
| fn=initialize_defaults, | |
| outputs=[self.server_checkboxes] | |
| ) | |
| self.provider_dropdown.change( | |
| handle_provider_change, | |
| inputs=[self.provider_dropdown], | |
| outputs=[self.model_dropdown, self.api_status] | |
| ) | |
| self.model_dropdown.change( | |
| handle_model_change, | |
| inputs=[self.provider_dropdown, self.model_dropdown], | |
| outputs=[self.api_status] | |
| ) | |
| # Connect chat | |
| chat_submit = self.chat_input.submit( | |
| submit_message, | |
| inputs=[self.chat_input, chatbot], | |
| outputs=[chatbot, self.chat_input] | |
| ) | |
| chat_submit.then(enable_input, None, [self.chat_input]) | |
| # Connect server management with proper button state handling | |
| self.add_server_btn.click( | |
| fn=show_add_server_fields, | |
| outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn] | |
| ) | |
| # Connect save button | |
| self.save_server_btn.click( | |
| fn=handle_add_server, | |
| inputs=[self.server_name, self.mcp_dropdown], | |
| outputs=[self.mcp_dropdown, self.server_name, self.add_server_btn, self.save_server_btn, self.server_checkboxes, self.add_server_output] | |
| ) | |
| self.server_checkboxes.change( | |
| handle_server_toggle, | |
| inputs=[self.server_checkboxes], | |
| outputs=[self.add_server_output] | |
| ) | |
| self.remove_all_btn.click( | |
| handle_remove_all, | |
| outputs=[self.server_checkboxes, self.add_server_output] | |
| ) | |