ovi054 commited on
Commit
1556013
·
verified ·
1 Parent(s): 936bffc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -22
app.py CHANGED
@@ -4,8 +4,6 @@ import random
4
  import spaces
5
  import torch
6
  from diffusers import DiffusionPipeline
7
- # from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
- # from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
 
10
  dtype = torch.bfloat16
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -29,26 +27,9 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
29
  generator = torch.Generator().manual_seed(seed)
30
 
31
 
32
- # for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
33
- # prompt=prompt,
34
- # guidance_scale=guidance_scale,
35
- # num_inference_steps=num_inference_steps,
36
- # width=width,
37
- # height=height,
38
- # generator=generator,
39
- # output_type="pil",
40
- # good_vae=good_vae,
41
- # ):
42
- # yield img, seed
43
-
44
- # Handle LoRA loading
45
- # Load LoRA weights and prepare joint_attention_kwargs
46
  if lora_id and lora_id.strip() != "":
47
  pipe.unload_lora_weights()
48
  pipe.load_lora_weights(lora_id.strip())
49
- # joint_attention_kwargs = {"scale": lora_scale}
50
- # else:
51
- # joint_attention_kwargs = None
52
 
53
  try:
54
  image = pipe(
@@ -90,7 +71,7 @@ css = """
90
  """
91
 
92
  with gr.Blocks(css=css) as app:
93
- gr.HTML("<center><h1>FLUX.1-Dev with LoRA support</h1></center>")
94
  with gr.Column(elem_id="col-container"):
95
  with gr.Row():
96
  with gr.Column():
@@ -105,7 +86,7 @@ with gr.Blocks(css=css) as app:
105
  minimum=0,
106
  maximum=2,
107
  step=0.01,
108
- value=0.95,
109
  )
110
  with gr.Row():
111
  width = gr.Slider(label="Width", value=1024, minimum=64, maximum=2048, step=8)
@@ -114,7 +95,7 @@ with gr.Blocks(css=css) as app:
114
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
115
  with gr.Row():
116
  steps = gr.Slider(label="Inference steps steps", value=28, minimum=1, maximum=100, step=1)
117
- cfg = gr.Slider(label="Guidance Scale", value=3.5, minimum=1, maximum=20, step=0.5)
118
  # method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
119
 
120
  with gr.Row():
 
4
  import spaces
5
  import torch
6
  from diffusers import DiffusionPipeline
 
 
7
 
8
  dtype = torch.bfloat16
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
27
  generator = torch.Generator().manual_seed(seed)
28
 
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  if lora_id and lora_id.strip() != "":
31
  pipe.unload_lora_weights()
32
  pipe.load_lora_weights(lora_id.strip())
 
 
 
33
 
34
  try:
35
  image = pipe(
 
71
  """
72
 
73
  with gr.Blocks(css=css) as app:
74
+ gr.HTML("<center><h1>Qwen Image with LoRA support</h1></center>")
75
  with gr.Column(elem_id="col-container"):
76
  with gr.Row():
77
  with gr.Column():
 
86
  minimum=0,
87
  maximum=2,
88
  step=0.01,
89
+ value=1,
90
  )
91
  with gr.Row():
92
  width = gr.Slider(label="Width", value=1024, minimum=64, maximum=2048, step=8)
 
95
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
96
  with gr.Row():
97
  steps = gr.Slider(label="Inference steps steps", value=28, minimum=1, maximum=100, step=1)
98
+ cfg = gr.Slider(label="Guidance Scale", value=4, minimum=1, maximum=20, step=0.5)
99
  # method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
100
 
101
  with gr.Row():