jbilcke-hf HF Staff commited on
Commit
b652d46
·
1 Parent(s): 31fe0d9

fixing a bug with fallback and yaml parsing

Browse files
api_core.py CHANGED
@@ -204,7 +204,6 @@ class ChatRoom:
204
 
205
  class VideoGenerationAPI:
206
  def __init__(self):
207
- self.inference_client = InferenceClient(token=HF_TOKEN)
208
  self.hf_api = HfApi(token=HF_TOKEN)
209
  self.endpoint_manager = EndpointManager()
210
  self.active_requests: Dict[str, asyncio.Future] = {}
@@ -226,76 +225,74 @@ class VideoGenerationAPI:
226
  3. Server's HF token (only for built-in provider)
227
  4. Raise exception if no valid key is available
228
  """
 
229
  if not llm_config:
230
- return self.inference_client
 
 
 
 
 
 
231
 
232
  provider = llm_config.get('provider', '').lower()
 
 
 
 
 
 
 
 
 
 
 
 
233
  model = llm_config.get('model', '')
234
- api_key = llm_config.get('api_key', '') # Provider-specific API key
235
- hf_token = llm_config.get('hf_token', '') # User's HF token
236
 
 
 
 
 
237
  # If no provider or model specified, use default
238
- if not provider or not model:
239
- return self.inference_client
240
-
241
- try:
242
- # Map frontend provider names to HF InferenceClient provider names
243
- provider_mapping = {
244
- 'openai': 'openai',
245
- 'anthropic': 'anthropic',
246
- 'google': 'google',
247
- 'cohere': 'cohere',
248
- 'together': 'together',
249
- 'huggingface': None, # Use HF directly without provider
250
- 'builtin': None # Use server's default model
251
- }
252
-
253
- hf_provider = provider_mapping.get(provider)
254
-
255
- # Handle built-in provider first (always uses server's HF token and default model)
256
- if provider == 'builtin':
257
- if HF_TOKEN:
258
- # Use server's default model from HF_TEXT_MODEL
259
- return InferenceClient(
260
- model=TEXT_MODEL if TEXT_MODEL else model,
261
- token=HF_TOKEN
262
- )
263
- else:
264
- raise ValueError("Built-in provider is not available. Server HF_TOKEN is not configured.")
265
-
266
- # Priority 1: Use provider-specific API key if available
267
- if api_key and hf_provider:
268
  return InferenceClient(
269
- provider=hf_provider,
270
- model=model,
271
- api_key=api_key
272
  )
273
- elif api_key and provider == 'huggingface':
274
- # For HuggingFace provider with an API key (treat it as HF token)
 
 
 
 
 
 
275
  return InferenceClient(
 
276
  model=model,
277
- token=api_key
278
  )
279
-
280
- # Priority 2: Use user's HF token if available
281
- if hf_token:
282
  return InferenceClient(
 
283
  model=model,
284
- token=hf_token
285
  )
286
-
287
- # No valid API key available
288
- # Note: Server's HF token is NEVER used for inference providers
289
- if provider == 'huggingface':
290
- raise ValueError("No API key provided. Please provide your Hugging Face API key.")
291
  else:
292
- raise ValueError(f"No API key provided for {provider}. Please provide either a {provider} API key or your Hugging Face API key.")
293
-
294
  except ValueError:
295
  # Re-raise ValueError for missing API keys
296
  raise
297
  except Exception as e:
298
- logger.error(f"Error creating InferenceClient with config {llm_config}: {e}")
299
  # Re-raise all other exceptions
300
  raise
301
 
@@ -543,9 +540,18 @@ title: \""""
543
  raw_yaml_str = raw_yaml_str.strip()
544
  else:
545
  raw_yaml_str = re.sub(r'^\s*\.\s*\n', '', f"title: \"{raw_yaml_str}")
546
-
 
 
 
 
 
 
 
 
 
 
547
  sanitized_yaml = sanitize_yaml_response(raw_yaml_str)
548
- #logger.info(f"search_video(): sanitized_yaml = {sanitized_yaml}")
549
 
550
  try:
551
  result = yaml.safe_load(sanitized_yaml)
@@ -598,11 +604,17 @@ title: \""""
598
  current_attempt += 1
599
  temperature = random.uniform(0.68, 0.72) # Try with different random temperature on next attempt
600
 
 
 
 
 
 
 
601
  # If all attempts failed, return a simple result with title only
602
  return {
603
  'id': str(uuid.uuid4()),
604
- 'title': f"Video about {query}",
605
- 'description': f"Video about {query}",
606
  'thumbnailUrl': '',
607
  'videoUrl': '',
608
  'isLatent': True,
@@ -888,8 +900,7 @@ Your caption:"""
888
  seed = options.get('seed', generate_seed())
889
  request_id = str(uuid.uuid4())[:8] # Generate a short ID for logging
890
 
891
- logger.info(f"[{request_id}] Starting video thumbnail generation for video_id: {video_id}")
892
- logger.info(f"[{request_id}] Title: '{title}', User role: {user_role}")
893
 
894
  # Create a more concise prompt for the thumbnail
895
  clip_caption = f"{video_prompt_prefix} - {title.strip()}"
 
204
 
205
  class VideoGenerationAPI:
206
  def __init__(self):
 
207
  self.hf_api = HfApi(token=HF_TOKEN)
208
  self.endpoint_manager = EndpointManager()
209
  self.active_requests: Dict[str, asyncio.Future] = {}
 
225
  3. Server's HF token (only for built-in provider)
226
  4. Raise exception if no valid key is available
227
  """
228
+
229
  if not llm_config:
230
+ if HF_TOKEN:
231
+ return InferenceClient(
232
+ model=TEXT_MODEL,
233
+ token=HF_TOKEN
234
+ )
235
+ else:
236
+ raise ValueError("Built-in provider is not available. Server HF_TOKEN is not configured.")
237
 
238
  provider = llm_config.get('provider', '').lower()
239
+ logger.info(f"provider = {provider}")
240
+
241
+ # If no provider or model specified, use default
242
+ if not provider or provider == 'built-in':
243
+ if HF_TOKEN:
244
+ return InferenceClient(
245
+ model=TEXT_MODEL,
246
+ token=HF_TOKEN
247
+ )
248
+ else:
249
+ raise ValueError("Built-in provider is not available. Server HF_TOKEN is not configured.")
250
+
251
  model = llm_config.get('model', '')
252
+ user_provider_api_key = llm_config.get('api_key', '') # Provider-specific API key
253
+ user_hf_token = llm_config.get('hf_token', '') # User's HF token
254
 
255
+ #logger.info(f"model = {model}")
256
+ #logger.info(f"user_provider_api_key = {user_provider_api_key}")
257
+ #logger.info(f"user_hf_token = {user_hf_token}")
258
+
259
  # If no provider or model specified, use default
260
+ if not provider or provider == 'built-in':
261
+ if HF_TOKEN:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
  return InferenceClient(
263
+ model=TEXT_MODEL,
264
+ token=HF_TOKEN
 
265
  )
266
+ else:
267
+ raise ValueError("Built-in provider is not available. Server HF_TOKEN is not configured.")
268
+
269
+ try:
270
+ # Case 1: Use a provider with a provider-specific API key if available
271
+ # This mode is currently hidden in the Flutter UI (we don't ask for provider-specific keys yet)
272
+ # but it is implemented here so that we don't forget it later
273
+ if user_provider_api_key:
274
  return InferenceClient(
275
+ provider=hf_provider,
276
  model=model,
277
+ api_key=user_provider_api_key
278
  )
279
+
280
+ # Case 2: Use a provider with user's HF token if available
281
+ elif user_hf_token:
282
  return InferenceClient(
283
+ provider=hf_provider,
284
  model=model,
285
+ token=user_hf_token
286
  )
287
+ #
 
 
 
 
288
  else:
289
+ raise ValueError(f"No API key provided for provider '{provider}'. Please provide either a valid {provider} API key or your Hugging Face API key.")
290
+
291
  except ValueError:
292
  # Re-raise ValueError for missing API keys
293
  raise
294
  except Exception as e:
295
+ logger.error(f"Error creating InferenceClient for provider '{provider}' and model '{model}': {e}")
296
  # Re-raise all other exceptions
297
  raise
298
 
 
540
  raw_yaml_str = raw_yaml_str.strip()
541
  else:
542
  raw_yaml_str = re.sub(r'^\s*\.\s*\n', '', f"title: \"{raw_yaml_str}")
543
+
544
+ # Check if it already has a proper YAML structure
545
+ if not raw_yaml_str.startswith(('title:', 'title :')):
546
+ # Only wrap with title if it doesn't already have one
547
+ # Also escape any quotes in the string to prevent YAML parsing issues
548
+ escaped_yaml = raw_yaml_str.replace('"', '\\"')
549
+ raw_yaml_str = f'title: "{escaped_yaml}"'
550
+ else:
551
+ # If it already has title:, just clean it up
552
+ raw_yaml_str = re.sub(r'^\s*\.\s*\n', '', raw_yaml_str)
553
+
554
  sanitized_yaml = sanitize_yaml_response(raw_yaml_str)
 
555
 
556
  try:
557
  result = yaml.safe_load(sanitized_yaml)
 
604
  current_attempt += 1
605
  temperature = random.uniform(0.68, 0.72) # Try with different random temperature on next attempt
606
 
607
+
608
+ # List of video types to randomly choose from
609
+ video_types = ["documentary", "movie screencap, movie scene", "POV, gopro footage", "music video", "videogame gameplay", "creepy found footage"]
610
+
611
+ video_type = random.choice(video_types)
612
+
613
  # If all attempts failed, return a simple result with title only
614
  return {
615
  'id': str(uuid.uuid4()),
616
+ 'title': f"{query} ({video_type})",
617
+ 'description': f"{video_type}, {query}, engaging, detailed, dynamic, high quality, 4K, intricate details",
618
  'thumbnailUrl': '',
619
  'videoUrl': '',
620
  'isLatent': True,
 
900
  seed = options.get('seed', generate_seed())
901
  request_id = str(uuid.uuid4())[:8] # Generate a short ID for logging
902
 
903
+ logger.info(f"[{request_id}] Starting video thumbnail generation for video_id: {video_id}, tTitle: '{title}', User role: {user_role}")
 
904
 
905
  # Create a more concise prompt for the thumbnail
906
  clip_caption = f"{video_prompt_prefix} - {title.strip()}"
build/web/flutter_bootstrap.js CHANGED
@@ -38,6 +38,6 @@ _flutter.buildConfig = {"engineRevision":"1c9c20e7c3dd48c66f400a24d48ea806b4ab31
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
- serviceWorkerVersion: "2693113717"
42
  }
43
  });
 
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
+ serviceWorkerVersion: "2739260384"
42
  }
43
  });
build/web/flutter_service_worker.js CHANGED
@@ -3,7 +3,7 @@ const MANIFEST = 'flutter-app-manifest';
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
- const RESOURCES = {"flutter_bootstrap.js": "4e3f72d0d30bbd04184daf8bbd46ea55",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
 
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
+ const RESOURCES = {"flutter_bootstrap.js": "9b7a4a65d904e18b088b806df19f480c",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
build/web/index.html CHANGED
@@ -156,7 +156,7 @@
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
- <script src="flutter_bootstrap.js?v=1753357418" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
 
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
+ <script src="flutter_bootstrap.js?v=1753360434" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>