ysharma HF Staff commited on
Commit
6fc3759
·
verified ·
1 Parent(s): beb0d58

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +119 -5
config.py CHANGED
@@ -1,9 +1,10 @@
1
  """
2
  Configuration module for Universal MCP Client
 
3
  """
4
  import os
5
  from dataclasses import dataclass
6
- from typing import Optional
7
  import logging
8
 
9
  # Set up enhanced logging
@@ -23,6 +24,7 @@ class AppConfig:
23
 
24
  # API Configuration
25
  ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
 
26
 
27
  # Model Configuration
28
  CLAUDE_MODEL = "claude-sonnet-4-20250514"
@@ -42,6 +44,90 @@ class AppConfig:
42
  SUPPORTED_VIDEO_EXTENSIONS = ['.mp4', '.avi', '.mov']
43
  SUPPORTED_DOCUMENT_EXTENSIONS = ['.pdf', '.txt', '.docx']
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  @classmethod
46
  def get_all_media_extensions(cls):
47
  """Get all supported media file extensions"""
@@ -69,6 +155,16 @@ class AppConfig:
69
  """Check if file is any supported media type"""
70
  return any(ext in file_path.lower() for ext in cls.get_all_media_extensions())
71
 
 
 
 
 
 
 
 
 
 
 
72
  # Check for dependencies
73
  try:
74
  import httpx
@@ -77,25 +173,29 @@ except ImportError:
77
  HTTPX_AVAILABLE = False
78
  logger.warning("httpx not available - file upload functionality limited")
79
 
 
 
 
 
 
 
 
80
  # CSS Configuration
81
  CUSTOM_CSS = """
82
  /* Hide Gradio footer */
83
  footer {
84
  display: none !important;
85
  }
86
-
87
  /* Make chatbot expand to fill available space */
88
  .gradio-container {
89
  height: 100vh !important;
90
  }
91
-
92
  /* Ensure proper flex layout */
93
  .main-content {
94
  display: flex;
95
  flex-direction: column;
96
  height: 100%;
97
  }
98
-
99
  /* Input area stays at bottom with minimal padding */
100
  .input-area {
101
  margin-top: auto;
@@ -103,10 +203,24 @@ footer {
103
  padding-bottom: 0 !important;
104
  margin-bottom: 0 !important;
105
  }
106
-
107
  /* Reduce padding around chatbot */
108
  .chatbot {
109
  margin-bottom: 0 !important;
110
  padding-bottom: 0 !important;
111
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  """
 
1
  """
2
  Configuration module for Universal MCP Client
3
+ Enhanced with HuggingFace Inference Provider support
4
  """
5
  import os
6
  from dataclasses import dataclass
7
+ from typing import Optional, Dict, List
8
  import logging
9
 
10
  # Set up enhanced logging
 
24
 
25
  # API Configuration
26
  ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
27
+ HF_TOKEN = os.getenv("HF_TOKEN")
28
 
29
  # Model Configuration
30
  CLAUDE_MODEL = "claude-sonnet-4-20250514"
 
44
  SUPPORTED_VIDEO_EXTENSIONS = ['.mp4', '.avi', '.mov']
45
  SUPPORTED_DOCUMENT_EXTENSIONS = ['.pdf', '.txt', '.docx']
46
 
47
+ # Inference Providers Configuration
48
+ INFERENCE_PROVIDERS = {
49
+ "sambanova": {
50
+ "name": "SambaNova",
51
+ "description": "Ultra-fast inference on optimized hardware",
52
+ "supports_tools": True,
53
+ "models": [
54
+ "meta-llama/Llama-3.3-70B-Instruct",
55
+ "deepseek-ai/DeepSeek-R1-0528",
56
+ "meta-llama/Llama-4-Maverick-17B-128E-Instruct",
57
+ "intfloat/e5-mistral-7b-instruct"
58
+ ]
59
+ },
60
+ "together": {
61
+ "name": "Together AI",
62
+ "description": "High-performance inference for open models",
63
+ "supports_tools": True,
64
+ "models": [
65
+ "deepseek-ai/DeepSeek-V3-0324",
66
+ "Qwen/Qwen2.5-72B-Instruct",
67
+ "meta-llama/Llama-3.1-8B-Instruct",
68
+ "black-forest-labs/FLUX.1-dev"
69
+ ]
70
+ },
71
+ "replicate": {
72
+ "name": "Replicate",
73
+ "description": "Run AI models in the cloud",
74
+ "supports_tools": True,
75
+ "models": [
76
+ "meta/llama-2-70b-chat",
77
+ "mistralai/mixtral-8x7b-instruct-v0.1",
78
+ "black-forest-labs/flux-schnell"
79
+ ]
80
+ },
81
+ "groq": {
82
+ "name": "Groq",
83
+ "description": "Ultra-low latency LPU inference",
84
+ "supports_tools": True,
85
+ "models": [
86
+ "meta-llama/Llama-4-Scout-17B-16E-Instruct",
87
+ "llama-3.1-70b-versatile",
88
+ "mixtral-8x7b-32768"
89
+ ]
90
+ },
91
+ "fal-ai": {
92
+ "name": "fal.ai",
93
+ "description": "Fast AI model inference",
94
+ "supports_tools": True,
95
+ "models": [
96
+ "meta-llama/Llama-3.1-8B-Instruct",
97
+ "black-forest-labs/flux-pro"
98
+ ]
99
+ },
100
+ "fireworks-ai": {
101
+ "name": "Fireworks AI",
102
+ "description": "Production-ready inference platform",
103
+ "supports_tools": True,
104
+ "models": [
105
+ "accounts/fireworks/models/llama-v3p1-70b-instruct",
106
+ "accounts/fireworks/models/mixtral-8x7b-instruct"
107
+ ]
108
+ },
109
+ "cohere": {
110
+ "name": "Cohere",
111
+ "description": "Enterprise-grade language AI",
112
+ "supports_tools": True,
113
+ "models": [
114
+ "command-r-plus",
115
+ "command-r",
116
+ "command"
117
+ ]
118
+ },
119
+ "hf-inference": {
120
+ "name": "HF Inference",
121
+ "description": "Hugging Face serverless inference",
122
+ "supports_tools": True,
123
+ "models": [
124
+ "meta-llama/Llama-3.2-11B-Vision-Instruct",
125
+ "microsoft/DialoGPT-medium",
126
+ "intfloat/multilingual-e5-large"
127
+ ]
128
+ }
129
+ }
130
+
131
  @classmethod
132
  def get_all_media_extensions(cls):
133
  """Get all supported media file extensions"""
 
155
  """Check if file is any supported media type"""
156
  return any(ext in file_path.lower() for ext in cls.get_all_media_extensions())
157
 
158
+ @classmethod
159
+ def get_provider_models(cls, provider: str) -> List[str]:
160
+ """Get available models for a specific provider"""
161
+ return cls.INFERENCE_PROVIDERS.get(provider, {}).get("models", [])
162
+
163
+ @classmethod
164
+ def get_all_providers(cls) -> Dict[str, Dict]:
165
+ """Get all available inference providers"""
166
+ return cls.INFERENCE_PROVIDERS
167
+
168
  # Check for dependencies
169
  try:
170
  import httpx
 
173
  HTTPX_AVAILABLE = False
174
  logger.warning("httpx not available - file upload functionality limited")
175
 
176
+ try:
177
+ from huggingface_hub import InferenceClient
178
+ HF_INFERENCE_AVAILABLE = True
179
+ except ImportError:
180
+ HF_INFERENCE_AVAILABLE = False
181
+ logger.warning("huggingface_hub not available - inference provider functionality limited")
182
+
183
  # CSS Configuration
184
  CUSTOM_CSS = """
185
  /* Hide Gradio footer */
186
  footer {
187
  display: none !important;
188
  }
 
189
  /* Make chatbot expand to fill available space */
190
  .gradio-container {
191
  height: 100vh !important;
192
  }
 
193
  /* Ensure proper flex layout */
194
  .main-content {
195
  display: flex;
196
  flex-direction: column;
197
  height: 100%;
198
  }
 
199
  /* Input area stays at bottom with minimal padding */
200
  .input-area {
201
  margin-top: auto;
 
203
  padding-bottom: 0 !important;
204
  margin-bottom: 0 !important;
205
  }
 
206
  /* Reduce padding around chatbot */
207
  .chatbot {
208
  margin-bottom: 0 !important;
209
  padding-bottom: 0 !important;
210
  }
211
+ /* Provider selection styling */
212
+ .provider-selection {
213
+ border: 1px solid #e0e0e0;
214
+ border-radius: 8px;
215
+ padding: 10px;
216
+ margin: 5px 0;
217
+ }
218
+ .anthropic-config {
219
+ background-color: #f8f9fa;
220
+ border-left: 4px solid #28a745;
221
+ }
222
+ .hf-config {
223
+ background-color: #fff8e1;
224
+ border-left: 4px solid #ff9800;
225
+ }
226
  """