Spaces:
Sleeping
Sleeping
""" | |
UI Components for Universal MCP Client - Enhanced with Inference Provider Support | |
""" | |
import gradio as gr | |
from gradio import ChatMessage | |
from typing import Tuple, List, Dict, Any | |
from config import AppConfig, CUSTOM_CSS | |
from chat_handler import ChatHandler | |
from server_manager import ServerManager | |
from mcp_client import UniversalMCPClient | |
class UIComponents: | |
"""Manages Gradio UI components and event handlers with Inference Provider support""" | |
def __init__(self, mcp_client: UniversalMCPClient): | |
self.mcp_client = mcp_client | |
self.chat_handler = ChatHandler(mcp_client) | |
self.server_manager = ServerManager(mcp_client) | |
# State for current LLM backend | |
self.current_backend = "anthropic" # "anthropic" or "hf_inference" | |
self.current_provider = None | |
self.current_model = None | |
def create_interface(self) -> gr.Blocks: | |
"""Create the main Gradio interface with provider selection""" | |
with gr.Blocks( | |
title="Universal MCP Client", | |
theme=getattr(gr.themes, AppConfig.GRADIO_THEME.title())(), | |
fill_height=True, | |
css=CUSTOM_CSS | |
) as demo: | |
# Create sidebar | |
self._create_sidebar() | |
# Create main chat area | |
chatbot = self._create_main_chat_area() | |
# Set up event handlers | |
self._setup_event_handlers(chatbot) | |
return demo | |
def _create_sidebar(self): | |
"""Create the sidebar with LLM provider selection and server management""" | |
with gr.Sidebar(elem_id="main-sidebar"): | |
gr.Markdown("# Gradio.chat.app") | |
# LLM Backend Selection Section | |
self._create_llm_backend_selection() | |
# Collapsible information section | |
with gr.Accordion("📚 Guide & Info", open=False): | |
gr.Markdown(""" | |
## 🎯 How To Use | |
- **Choose LLM Backend**: Select between Anthropic Claude or HuggingFace Inference Providers | |
- **Chat with LLM**: Interact with your selected model | |
- **MCP Integration**: Add MCP servers for enhanced capabilities | |
- **Subscribe**: PRO subscribers get higher usage on various services | |
## 💭 New UI Features | |
- **Provider Selection**: Choose from multiple inference providers | |
- **Tool Usage**: See tool calls in collapsible accordions with 🔧 icons | |
- **Results**: View tool results in nested thoughts with 📋 icons | |
- **Media Output**: Images, audio, and videos display separately from text | |
- **Real-time Status**: Watch tools execute with pending/done status indicators | |
""") | |
# Current backend status | |
self.backend_status = gr.Markdown("**Current Backend**: Not configured") | |
# Server management | |
self._create_server_management_section() | |
def _create_llm_backend_selection(self): | |
"""Create LLM backend selection interface""" | |
gr.Markdown("## 🤖 LLM Backend Selection") | |
# Radio buttons for backend selection | |
self.backend_radio = gr.Radio( | |
choices=[ | |
("Anthropic Claude Sonnet 4", "anthropic"), | |
("HuggingFace Inference Providers", "hf_inference") | |
], | |
value="anthropic", | |
label="Choose LLM Backend", | |
info="Select your preferred language model backend" | |
) | |
# Anthropic configuration section | |
with gr.Column(visible=True, elem_classes="provider-selection anthropic-config") as self.anthropic_config: | |
gr.Markdown("### 🔹 Anthropic Claude Configuration") | |
# Check if API key is available | |
if AppConfig.ANTHROPIC_API_KEY: | |
self.anthropic_status = gr.Markdown("✅ **Status**: API key configured") | |
else: | |
self.anthropic_status = gr.Markdown(""" | |
❌ **Status**: API key not found | |
**Setup Instructions**: | |
1. Go to Space Settings → Secrets | |
2. Add `ANTHROPIC_API_KEY` with your Anthropic API key | |
3. Restart the space | |
""") | |
# HuggingFace Inference Provider configuration section | |
with gr.Column(visible=False, elem_classes="provider-selection hf-config") as self.hf_config: | |
gr.Markdown("### 🔸 HuggingFace Inference Provider Configuration") | |
# Check if HF token is available | |
if AppConfig.HF_TOKEN: | |
self.hf_status = gr.Markdown("✅ **Status**: HF token configured") | |
else: | |
self.hf_status = gr.Markdown(""" | |
❌ **Status**: HF token not found | |
**Setup Instructions**: | |
1. Go to Space Settings → Secrets | |
2. Add `HF_TOKEN` with your HuggingFace token | |
3. Restart the space | |
""") | |
# Provider dropdown | |
provider_choices = list(AppConfig.INFERENCE_PROVIDERS.keys()) | |
self.provider_dropdown = gr.Dropdown( | |
choices=provider_choices, | |
label="🚀 Select Inference Provider", | |
value=provider_choices[0] if provider_choices else None, | |
info="Choose your preferred inference provider" | |
) | |
# Model dropdown (initially empty, populated based on provider) | |
self.model_dropdown = gr.Dropdown( | |
choices=[], | |
label="🤖 Select Model", | |
value=None, | |
info="Choose the specific model to use" | |
) | |
# Configure button | |
self.configure_btn = gr.Button( | |
"Configure Inference Provider", | |
variant="primary", | |
size="sm" | |
) | |
self.provider_config_status = gr.Textbox( | |
label="Configuration Status", | |
interactive=False, | |
visible=False | |
) | |
def _create_server_management_section(self): | |
"""Create the server management section in sidebar""" | |
gr.Markdown("## 🔧 MCP Server Management") | |
# Server status | |
self.server_count_display = gr.Markdown(f"**Connected Servers**: {len(self.mcp_client.servers)}") | |
if self.mcp_client.servers: | |
server_list = "\n".join([f"• **{name}**" for name in self.mcp_client.servers.keys()]) | |
self.server_list_display = gr.Markdown(server_list) | |
else: | |
self.server_list_display = gr.Markdown("*No servers connected*\n\nAdd servers below.") | |
with gr.Accordion("⚙️ Manage MCP Servers", open=False): | |
gr.Markdown("**Add MCP Servers**") | |
# Get MCP spaces count for dropdown label | |
from mcp_spaces_finder import _finder | |
spaces = _finder.get_mcp_spaces() | |
spaces_count = len(spaces) | |
# Create MCP spaces dropdown | |
self.mcp_dropdown = gr.Dropdown( | |
choices=spaces, | |
label=f"🤖 Select from {spaces_count} Available MCP Servers", | |
value=None, | |
info="Choose a HuggingFace space that provides MCP server functionality", | |
allow_custom_value=True | |
) | |
# Server title input | |
self.server_name = gr.Textbox( | |
label="Give Server a Title", | |
placeholder="For Example, Text to Image Generator" | |
) | |
self.add_server_btn = gr.Button("Add Server", variant="primary") | |
self.add_server_output = gr.Textbox(label="Status", interactive=False, container=False) | |
self.add_server_details = gr.HTML(label="Details") | |
# Refresh button for MCP spaces | |
self.refresh_spaces_btn = gr.Button("🔄 Refresh Available Spaces", variant="secondary", size="sm") | |
self.status_btn = gr.Button("Refresh Status", variant="secondary") | |
self.status_count = gr.Markdown("**Total MCP Servers**: 0") | |
self.status_output = gr.HTML() | |
def _create_main_chat_area(self) -> gr.Chatbot: | |
"""Create the main chat area with ChatMessage support""" | |
with gr.Column(elem_classes="main-content"): | |
# Chatbot takes most of the space - configured for ChatMessage | |
chatbot = gr.Chatbot( | |
label="Universal MCP-Powered Multimodal Chatbot", | |
show_label=False, | |
type="messages", # Essential for ChatMessage support | |
scale=1, # Expand to fill available space | |
show_copy_button=True, | |
avatar_images=None, | |
# Initial welcome message with proper ChatMessage format | |
value=[ | |
ChatMessage( | |
role="assistant", | |
content="Welcome! I'm your MCP-powered AI assistant. Please configure your LLM backend in the sidebar to get started. I can help you with various tasks using connected MCP servers!" | |
) | |
] | |
) | |
# Input area at bottom - fixed size | |
with gr.Column(scale=0, elem_classes="input-area"): | |
self.chat_input = gr.MultimodalTextbox( | |
interactive=True, | |
file_count="multiple", | |
placeholder="Enter message or upload files (images, audio, video, documents)...", | |
show_label=False, | |
sources=["upload", "microphone"], | |
file_types=None # Accept all file types | |
) | |
return chatbot | |
def _setup_event_handlers(self, chatbot: gr.Chatbot): | |
"""Set up all event handlers for the interface""" | |
# Backend selection event handlers | |
def handle_backend_change(backend_choice): | |
"""Handle LLM backend selection change""" | |
self.current_backend = backend_choice | |
# Update visibility of configuration sections | |
anthropic_visible = (backend_choice == "anthropic") | |
hf_visible = (backend_choice == "hf_inference") | |
# Update backend status | |
if backend_choice == "anthropic": | |
if AppConfig.ANTHROPIC_API_KEY: | |
status = "**Current Backend**: Anthropic Claude Sonnet 4 ✅" | |
else: | |
status = "**Current Backend**: Anthropic Claude (❌ API key needed)" | |
else: | |
if AppConfig.HF_TOKEN: | |
status = "**Current Backend**: HF Inference Providers (⚙️ Configure provider)" | |
else: | |
status = "**Current Backend**: HF Inference Providers (❌ HF token needed)" | |
return ( | |
gr.update(visible=anthropic_visible), # anthropic_config | |
gr.update(visible=hf_visible), # hf_config | |
status # backend_status | |
) | |
def handle_provider_change(provider): | |
"""Handle inference provider change""" | |
if not provider: | |
return gr.update(choices=[], value=None) | |
models = AppConfig.get_provider_models(provider) | |
return gr.update(choices=models, value=models[0] if models else None) | |
def handle_provider_configuration(provider, model): | |
"""Handle inference provider configuration""" | |
if not provider or not model: | |
return "❌ Please select both provider and model", gr.update(visible=True) | |
if not AppConfig.HF_TOKEN: | |
return "❌ HF_TOKEN not configured. Please add it to space secrets.", gr.update(visible=True) | |
# Configure the MCP client with the selected provider | |
success = self.mcp_client.configure_inference_provider(provider, model) | |
if success: | |
self.current_provider = provider | |
self.current_model = model | |
# Update chat handler | |
self.chat_handler.mcp_client = self.mcp_client | |
status_msg = f"✅ Configured {provider} with {model}" | |
# Update backend status | |
backend_status = f"**Current Backend**: {provider}/{model} ✅" | |
return status_msg, gr.update(visible=True) | |
else: | |
return "❌ Failed to configure inference provider", gr.update(visible=True) | |
# Chat event handlers | |
def submit_message(message, history): | |
"""Handle message submission with ChatMessage support""" | |
# Check if backend is properly configured | |
if self.current_backend == "anthropic" and not AppConfig.ANTHROPIC_API_KEY: | |
error_msg = "❌ Please configure Anthropic API key in space settings first." | |
history.append(ChatMessage(role="assistant", content=error_msg)) | |
return history, gr.MultimodalTextbox(value=None, interactive=False) | |
if self.current_backend == "hf_inference" and (not self.current_provider or not self.current_model): | |
error_msg = "❌ Please configure HuggingFace inference provider first." | |
history.append(ChatMessage(role="assistant", content=error_msg)) | |
return history, gr.MultimodalTextbox(value=None, interactive=False) | |
if message and (message.get("text", "").strip() or message.get("files", [])): | |
# Convert existing history to ChatMessage objects if needed | |
converted_history = [] | |
for msg in history: | |
if isinstance(msg, dict): | |
# Convert dict to ChatMessage | |
converted_history.append(ChatMessage( | |
role=msg.get('role', 'assistant'), | |
content=msg.get('content', ''), | |
metadata=msg.get('metadata', None) | |
)) | |
else: | |
# Already a ChatMessage | |
converted_history.append(msg) | |
new_history, cleared_input = self.chat_handler.process_multimodal_message(message, converted_history) | |
return new_history, cleared_input | |
return history, gr.MultimodalTextbox(value=None, interactive=False) | |
def enable_input(): | |
"""Re-enable input after processing""" | |
return gr.MultimodalTextbox(interactive=True) | |
# Connect backend selection events | |
self.backend_radio.change( | |
handle_backend_change, | |
inputs=[self.backend_radio], | |
outputs=[self.anthropic_config, self.hf_config, self.backend_status] | |
) | |
# Connect provider selection events | |
self.provider_dropdown.change( | |
handle_provider_change, | |
inputs=[self.provider_dropdown], | |
outputs=[self.model_dropdown] | |
) | |
# Connect provider configuration | |
self.configure_btn.click( | |
handle_provider_configuration, | |
inputs=[self.provider_dropdown, self.model_dropdown], | |
outputs=[self.provider_config_status, self.provider_config_status] | |
).then( | |
lambda: self._update_backend_status(), | |
outputs=[self.backend_status] | |
) | |
# Set up the chat flow - using submit event | |
chat_msg_enter = self.chat_input.submit( | |
submit_message, | |
inputs=[self.chat_input, chatbot], | |
outputs=[chatbot, self.chat_input] | |
) | |
chat_msg_enter.then(enable_input, None, [self.chat_input]) | |
# Server management event handlers | |
def update_server_display(): | |
"""Update the server status display in sidebar""" | |
server_count = len(self.mcp_client.servers) | |
count_text = f"**Connected Servers**: {server_count}" | |
if self.mcp_client.servers: | |
server_list = "\n".join([f"• **{name}**" for name in self.mcp_client.servers.keys()]) | |
return count_text, server_list | |
else: | |
return count_text, "*No servers connected*\n\nAdd servers below." | |
def handle_add_server(server_title, selected_space): | |
"""Handle adding a server and update displays""" | |
# Check if both title and space are provided | |
if not server_title or not server_title.strip(): | |
return "❌ Please provide a server title", "", *update_server_display(), "" | |
if not selected_space: | |
return "❌ Please select a HuggingFace space from the dropdown", "", *update_server_display(), "" | |
# Use the selected space from dropdown | |
status_msg, details_html = self.server_manager.add_custom_server(server_title.strip(), selected_space) | |
# Update sidebar server display | |
count_text, list_text = update_server_display() | |
return status_msg, details_html, count_text, list_text, "" # Clear server name input | |
def handle_refresh_status(): | |
"""Handle refresh status button""" | |
count_text, accordions_html = self.server_manager.get_server_status() | |
return count_text, accordions_html | |
def handle_refresh_spaces(): | |
"""Handle refresh MCP spaces button""" | |
from mcp_spaces_finder import refresh_mcp_spaces, _finder | |
# Clear cache and get fresh spaces | |
refresh_mcp_spaces() | |
spaces = _finder.get_mcp_spaces() | |
spaces_count = len(spaces) | |
# Update dropdown choices with new count | |
return gr.Dropdown( | |
choices=spaces, | |
value=None, | |
label=f"🤖 Select from {spaces_count} Available MCP Server Spaces", | |
info="Choose a HuggingFace space that provides MCP server functionality" | |
) | |
# Connect server management events | |
self.add_server_btn.click( | |
handle_add_server, | |
inputs=[self.server_name, self.mcp_dropdown], | |
outputs=[ | |
self.add_server_output, | |
self.add_server_details, | |
self.server_count_display, | |
self.server_list_display, | |
self.server_name # Clear server name input | |
] | |
) | |
self.status_btn.click( | |
handle_refresh_status, | |
outputs=[self.status_count, self.status_output] | |
) | |
# Connect refresh spaces button | |
self.refresh_spaces_btn.click( | |
handle_refresh_spaces, | |
outputs=[self.mcp_dropdown] | |
) | |
def _update_backend_status(self): | |
"""Update backend status based on current configuration""" | |
if self.current_backend == "anthropic": | |
if AppConfig.ANTHROPIC_API_KEY: | |
return "**Current Backend**: Anthropic Claude Sonnet 4 ✅" | |
else: | |
return "**Current Backend**: Anthropic Claude (❌ API key needed)" | |
else: | |
if self.current_provider and self.current_model: | |
return f"**Current Backend**: {self.current_provider}/{self.current_model} ✅" | |
else: | |
return "**Current Backend**: HF Inference Providers (⚙️ Configure provider)" |