""" Configuration module for Universal MCP Client Enhanced with HuggingFace Inference Provider support """ import os from dataclasses import dataclass from typing import Optional, Dict, List import logging # Set up enhanced logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) @dataclass class MCPServerConfig: """Configuration for an MCP server connection""" name: str url: str description: str space_id: Optional[str] = None class AppConfig: """Application configuration settings""" # API Configuration ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY") HF_TOKEN = os.getenv("HF_TOKEN") # Model Configuration CLAUDE_MODEL = "claude-sonnet-4-20250514" MAX_TOKENS = 2048 # MCP Configuration MCP_BETA_VERSION = "mcp-client-2025-04-04" MCP_TIMEOUT_SECONDS = 20.0 # UI Configuration GRADIO_THEME = "citrus" DEBUG_MODE = True # File Support SUPPORTED_IMAGE_EXTENSIONS = ['.png', '.jpg', '.jpeg', '.gif', '.webp'] SUPPORTED_AUDIO_EXTENSIONS = ['.mp3', '.wav', '.ogg', '.m4a', '.flac'] SUPPORTED_VIDEO_EXTENSIONS = ['.mp4', '.avi', '.mov'] SUPPORTED_DOCUMENT_EXTENSIONS = ['.pdf', '.txt', '.docx'] # Inference Providers Configuration INFERENCE_PROVIDERS = { "sambanova": { "name": "SambaNova", "description": "Ultra-fast inference on optimized hardware", "supports_tools": True, "models": [ "meta-llama/Llama-3.3-70B-Instruct", "deepseek-ai/DeepSeek-R1-0528", "meta-llama/Llama-4-Maverick-17B-128E-Instruct", "intfloat/e5-mistral-7b-instruct" ] }, "together": { "name": "Together AI", "description": "High-performance inference for open models", "supports_tools": True, "models": [ "deepseek-ai/DeepSeek-V3-0324", "Qwen/Qwen2.5-72B-Instruct", "meta-llama/Llama-3.1-8B-Instruct", "black-forest-labs/FLUX.1-dev" ] }, "replicate": { "name": "Replicate", "description": "Run AI models in the cloud", "supports_tools": True, "models": [ "meta/llama-2-70b-chat", "mistralai/mixtral-8x7b-instruct-v0.1", "black-forest-labs/flux-schnell" ] }, "groq": { "name": "Groq", "description": "Ultra-low latency LPU inference", "supports_tools": True, "models": [ "meta-llama/Llama-4-Scout-17B-16E-Instruct", "llama-3.1-70b-versatile", "mixtral-8x7b-32768" ] }, "fal-ai": { "name": "fal.ai", "description": "Fast AI model inference", "supports_tools": True, "models": [ "meta-llama/Llama-3.1-8B-Instruct", "black-forest-labs/flux-pro" ] }, "fireworks-ai": { "name": "Fireworks AI", "description": "Production-ready inference platform", "supports_tools": True, "models": [ "accounts/fireworks/models/llama-v3p1-70b-instruct", "accounts/fireworks/models/mixtral-8x7b-instruct" ] }, "cohere": { "name": "Cohere", "description": "Enterprise-grade language AI", "supports_tools": True, "models": [ "command-r-plus", "command-r", "command" ] }, "hf-inference": { "name": "HF Inference", "description": "Hugging Face serverless inference", "supports_tools": True, "models": [ "meta-llama/Llama-3.2-11B-Vision-Instruct", "microsoft/DialoGPT-medium", "intfloat/multilingual-e5-large" ] } } @classmethod def get_all_media_extensions(cls): """Get all supported media file extensions""" return (cls.SUPPORTED_IMAGE_EXTENSIONS + cls.SUPPORTED_AUDIO_EXTENSIONS + cls.SUPPORTED_VIDEO_EXTENSIONS) @classmethod def is_image_file(cls, file_path: str) -> bool: """Check if file is an image""" return any(ext in file_path.lower() for ext in cls.SUPPORTED_IMAGE_EXTENSIONS) @classmethod def is_audio_file(cls, file_path: str) -> bool: """Check if file is an audio file""" return any(ext in file_path.lower() for ext in cls.SUPPORTED_AUDIO_EXTENSIONS) @classmethod def is_video_file(cls, file_path: str) -> bool: """Check if file is a video file""" return any(ext in file_path.lower() for ext in cls.SUPPORTED_VIDEO_EXTENSIONS) @classmethod def is_media_file(cls, file_path: str) -> bool: """Check if file is any supported media type""" return any(ext in file_path.lower() for ext in cls.get_all_media_extensions()) @classmethod def get_provider_models(cls, provider: str) -> List[str]: """Get available models for a specific provider""" return cls.INFERENCE_PROVIDERS.get(provider, {}).get("models", []) @classmethod def get_all_providers(cls) -> Dict[str, Dict]: """Get all available inference providers""" return cls.INFERENCE_PROVIDERS # Check for dependencies try: import httpx HTTPX_AVAILABLE = True except ImportError: HTTPX_AVAILABLE = False logger.warning("httpx not available - file upload functionality limited") try: from huggingface_hub import InferenceClient HF_INFERENCE_AVAILABLE = True except ImportError: HF_INFERENCE_AVAILABLE = False logger.warning("huggingface_hub not available - inference provider functionality limited") # CSS Configuration CUSTOM_CSS = """ /* Hide Gradio footer */ footer { display: none !important; } /* Make chatbot expand to fill available space */ .gradio-container { height: 100vh !important; } /* Ensure proper flex layout */ .main-content { display: flex; flex-direction: column; height: 100%; } /* Input area stays at bottom with minimal padding */ .input-area { margin-top: auto; padding-top: 0.25rem !important; padding-bottom: 0 !important; margin-bottom: 0 !important; } /* Reduce padding around chatbot */ .chatbot { margin-bottom: 0 !important; padding-bottom: 0 !important; } /* Provider selection styling */ .provider-selection { border: 1px solid #e0e0e0; border-radius: 8px; padding: 10px; margin: 5px 0; } .anthropic-config { background-color: #f8f9fa; border-left: 4px solid #28a745; } .hf-config { background-color: #fff8e1; border-left: 4px solid #ff9800; } """