Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -217,9 +217,9 @@ def adjust_generation_mode(speed_mode):
|
|
| 217 |
if speed_mode == "Speed (8 steps)":
|
| 218 |
return gr.update(value="Speed mode selected - 8 steps with Lightning LoRA"), 8, 1.0
|
| 219 |
else:
|
| 220 |
-
return gr.update(value="Quality mode selected -
|
| 221 |
|
| 222 |
-
@spaces.GPU(duration=
|
| 223 |
def create_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, negative_prompt=""):
|
| 224 |
pipe.to("cuda")
|
| 225 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
@@ -238,7 +238,7 @@ def create_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale,
|
|
| 238 |
|
| 239 |
return image
|
| 240 |
|
| 241 |
-
@spaces.GPU(duration=
|
| 242 |
def process_adapter_generation(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, aspect_ratio, lora_scale, speed_mode, progress=gr.Progress(track_tqdm=True)):
|
| 243 |
if selected_index is None:
|
| 244 |
raise gr.Error("You must select a LoRA before proceeding.")
|
|
|
|
| 217 |
if speed_mode == "Speed (8 steps)":
|
| 218 |
return gr.update(value="Speed mode selected - 8 steps with Lightning LoRA"), 8, 1.0
|
| 219 |
else:
|
| 220 |
+
return gr.update(value="Quality mode selected - 48 steps for best quality"), 48, 3.5
|
| 221 |
|
| 222 |
+
@spaces.GPU(duration=108)
|
| 223 |
def create_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, negative_prompt=""):
|
| 224 |
pipe.to("cuda")
|
| 225 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
|
| 238 |
|
| 239 |
return image
|
| 240 |
|
| 241 |
+
@spaces.GPU(duration=108)
|
| 242 |
def process_adapter_generation(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, aspect_ratio, lora_scale, speed_mode, progress=gr.Progress(track_tqdm=True)):
|
| 243 |
if selected_index is None:
|
| 244 |
raise gr.Error("You must select a LoRA before proceeding.")
|