Spaces:
Running
on
Zero
Running
on
Zero
Upload 2 files
Browse files- app.py +5 -7
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -8,7 +8,6 @@ from diffusers import FluxControlNetPipeline, FluxControlNetModel, FluxMultiCont
|
|
| 8 |
from huggingface_hub import HfFileSystem, ModelCard
|
| 9 |
import random
|
| 10 |
import time
|
| 11 |
-
import os
|
| 12 |
|
| 13 |
from env import models, num_loras, num_cns
|
| 14 |
from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
|
|
@@ -130,7 +129,6 @@ def update_selection(evt: gr.SelectData, width, height):
|
|
| 130 |
|
| 131 |
@spaces.GPU(duration=70)
|
| 132 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
|
| 133 |
-
from diffusers.utils import load_image
|
| 134 |
global pipe
|
| 135 |
global taef1
|
| 136 |
global good_vae
|
|
@@ -139,7 +137,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
|
|
| 139 |
try:
|
| 140 |
good_vae.to("cuda")
|
| 141 |
taef1.to("cuda")
|
| 142 |
-
generator = torch.Generator(device="cuda").manual_seed(seed)
|
| 143 |
|
| 144 |
with calculateDuration("Generating image"):
|
| 145 |
# Generate image
|
|
@@ -163,10 +161,10 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
|
|
| 163 |
yield img
|
| 164 |
else:
|
| 165 |
pipe.to("cuda")
|
| 166 |
-
if controlnet is not None: controlnet.to("cuda")
|
| 167 |
-
if controlnet_union is not None: controlnet_union.to("cuda")
|
| 168 |
pipe.vae = good_vae
|
| 169 |
-
|
|
|
|
|
|
|
| 170 |
progress(0, desc="Start Inference with ControlNet.")
|
| 171 |
for img in pipe(
|
| 172 |
prompt=prompt_mash,
|
|
@@ -443,7 +441,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css, delete_cache
|
|
| 443 |
lora_download = [None] * num_loras
|
| 444 |
for i in range(num_loras):
|
| 445 |
lora_download[i] = gr.Button(f"Get and set LoRA to {int(i+1)}")
|
| 446 |
-
with gr.Accordion("ControlNet (
|
| 447 |
with gr.Column():
|
| 448 |
cn_on = gr.Checkbox(False, label="Use ControlNet")
|
| 449 |
cn_mode = [None] * num_cns
|
|
|
|
| 8 |
from huggingface_hub import HfFileSystem, ModelCard
|
| 9 |
import random
|
| 10 |
import time
|
|
|
|
| 11 |
|
| 12 |
from env import models, num_loras, num_cns
|
| 13 |
from mod import (clear_cache, get_repo_safetensors, is_repo_name, is_repo_exists, get_model_trigger,
|
|
|
|
| 129 |
|
| 130 |
@spaces.GPU(duration=70)
|
| 131 |
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 132 |
global pipe
|
| 133 |
global taef1
|
| 134 |
global good_vae
|
|
|
|
| 137 |
try:
|
| 138 |
good_vae.to("cuda")
|
| 139 |
taef1.to("cuda")
|
| 140 |
+
generator = torch.Generator(device="cuda").manual_seed(int(float(seed)))
|
| 141 |
|
| 142 |
with calculateDuration("Generating image"):
|
| 143 |
# Generate image
|
|
|
|
| 161 |
yield img
|
| 162 |
else:
|
| 163 |
pipe.to("cuda")
|
|
|
|
|
|
|
| 164 |
pipe.vae = good_vae
|
| 165 |
+
if controlnet_union is not None: controlnet_union.to("cuda")
|
| 166 |
+
if controlnet is not None: controlnet.to("cuda")
|
| 167 |
+
pipe.enable_model_cpu_offload()
|
| 168 |
progress(0, desc="Start Inference with ControlNet.")
|
| 169 |
for img in pipe(
|
| 170 |
prompt=prompt_mash,
|
|
|
|
| 441 |
lora_download = [None] * num_loras
|
| 442 |
for i in range(num_loras):
|
| 443 |
lora_download[i] = gr.Button(f"Get and set LoRA to {int(i+1)}")
|
| 444 |
+
with gr.Accordion("ControlNet (extremely slow)", open=True, visible=True):
|
| 445 |
with gr.Column():
|
| 446 |
cn_on = gr.Checkbox(False, label="Use ControlNet")
|
| 447 |
cn_mode = [None] * num_cns
|
requirements.txt
CHANGED
|
@@ -16,4 +16,4 @@ deepspeed
|
|
| 16 |
mediapipe
|
| 17 |
openai==1.37.0
|
| 18 |
translatepy
|
| 19 |
-
|
|
|
|
| 16 |
mediapipe
|
| 17 |
openai==1.37.0
|
| 18 |
translatepy
|
| 19 |
+
accelerate
|