linoyts HF Staff commited on
Commit
3749736
·
verified ·
1 Parent(s): 864b980

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -4
app.py CHANGED
@@ -357,7 +357,8 @@ DIRECTION_OPTIONS = {
357
  def infer(
358
  image,
359
  prompt,
360
- illumination_dropdown, direction_dropdown,
 
361
  seed=42,
362
  randomize_seed=True,
363
  true_guidance_scale=1.0,
@@ -366,7 +367,41 @@ def infer(
366
  progress=gr.Progress(track_tqdm=True),
367
  ):
368
  """
369
- Generates an edited image using the Qwen-Image-Edit pipeline with Lightning acceleration.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  """
371
  # Hardcode the negative prompt as in the original
372
  negative_prompt = " "
@@ -412,7 +447,7 @@ def infer(
412
  num_images_per_prompt=1 # Always generate only 1 image
413
  ).images
414
 
415
- # Return the first (and only) image
416
  return [image,images[0]], seed
417
 
418
  except Exception as e:
@@ -579,4 +614,4 @@ with gr.Blocks(css=css) as demo:
579
  )
580
 
581
  if __name__ == "__main__":
582
- demo.launch()
 
357
  def infer(
358
  image,
359
  prompt,
360
+ illumination_dropdown="custom",
361
+ direction_dropdown="auto",
362
  seed=42,
363
  randomize_seed=True,
364
  true_guidance_scale=1.0,
 
367
  progress=gr.Progress(track_tqdm=True),
368
  ):
369
  """
370
+ Generates an edited image using Qwen-Image-Edit pipeline model with Lightning LoRA.
371
+
372
+ This function takes an input image and generates a video animation based on the provided
373
+ prompt and parameters. It uses an FP8 qunatized Wan 2.2 14B Image-to-Video model in with Lightning LoRA
374
+ for fast generation in 4-8 steps.
375
+
376
+ Args:
377
+ input_image (PIL.Image): The input image to edit.
378
+ prompt (str): Text prompt describing the desired lighting conditions.
379
+ illumination_dropdown (str, optional): Lighting style.
380
+ Defaults to 'custom' - lighting style will be determined by the prompt.
381
+ direction_dropdown (str, optional): Lighting direction, can be one of [auto, left side, right side, top, top left, top right, bottom, front, back].
382
+ Defaults to 'auto', not specifying a specific direction.
383
+ seed (int, optional): Random seed for reproducible results. Defaults to 42.
384
+ Range: 0 to MAX_SEED (2147483647).
385
+ randomize_seed (bool, optional): Whether to use a random seed instead of the provided seed.
386
+ Defaults to False.
387
+ true_guidance_scale (float, optional): Controls adherence to the prompt. Higher values = more adherence.
388
+ Defaults to 1.0. Range: 0.0-20.0.
389
+ num_inference_steps(int, optional): Number of inference steps.
390
+ Defaults to 8. Range:
391
+ rewrite_prompt(bool, optional): Whether or not to polish the edit prompt with Qwen/Qwen2.5-VL-72B-Instruct using HF inference.
392
+ Defaults to True. Range: 1-28.
393
+ progress (gr.Progress, optional): Gradio progress tracker. Defaults to gr.Progress(track_tqdm=True).
394
+
395
+ Returns:
396
+ tuple: A tuple containing:
397
+ - List[PIL.Image.Image]: a 2 element list, containting the input image and the edited image
398
+ - int: The seed used for generation (useful when randomize_seed=True)
399
+
400
+ Raises:
401
+ gr.Error: If input_image is None (no image uploaded).
402
+
403
+ Note:
404
+ - The function uses GPU acceleration via the @spaces.GPU decorator
405
  """
406
  # Hardcode the negative prompt as in the original
407
  negative_prompt = " "
 
447
  num_images_per_prompt=1 # Always generate only 1 image
448
  ).images
449
 
450
+
451
  return [image,images[0]], seed
452
 
453
  except Exception as e:
 
614
  )
615
 
616
  if __name__ == "__main__":
617
+ demo.launch(mcp_server=True)