Spaces:
Running
on
Zero
Running
on
Zero
改为共用一个pipe
Browse files- ominicontrol.py +3 -4
ominicontrol.py
CHANGED
|
@@ -12,9 +12,6 @@ pipe = FluxPipeline.from_pretrained(
|
|
| 12 |
)
|
| 13 |
pipe = pipe.to("cuda")
|
| 14 |
|
| 15 |
-
prompt_pipe = FluxPipeline.from_pipe(pipe)
|
| 16 |
-
prompt_pipe = prompt_pipe.to("cuda")
|
| 17 |
-
|
| 18 |
pipe.unload_lora_weights()
|
| 19 |
|
| 20 |
pipe.load_lora_weights(
|
|
@@ -48,6 +45,7 @@ def generate_image(
|
|
| 48 |
use_random_seed,
|
| 49 |
seed,
|
| 50 |
):
|
|
|
|
| 51 |
# Prepare Condition
|
| 52 |
def resize(img, factor=16):
|
| 53 |
w, h = img.size
|
|
@@ -144,6 +142,7 @@ def generate_image_with_prompt(
|
|
| 144 |
use_random_seed,
|
| 145 |
seed,
|
| 146 |
):
|
|
|
|
| 147 |
# Prepare Condition
|
| 148 |
def resize(img, factor=16):
|
| 149 |
w, h = img.size
|
|
@@ -204,7 +203,7 @@ def generate_image_with_prompt(
|
|
| 204 |
)
|
| 205 |
# Generate
|
| 206 |
result_img = generate(
|
| 207 |
-
|
| 208 |
prompt=prompt,
|
| 209 |
conditions=[condition],
|
| 210 |
num_inference_steps=steps,
|
|
|
|
| 12 |
)
|
| 13 |
pipe = pipe.to("cuda")
|
| 14 |
|
|
|
|
|
|
|
|
|
|
| 15 |
pipe.unload_lora_weights()
|
| 16 |
|
| 17 |
pipe.load_lora_weights(
|
|
|
|
| 45 |
use_random_seed,
|
| 46 |
seed,
|
| 47 |
):
|
| 48 |
+
pipe.enable_lora()
|
| 49 |
# Prepare Condition
|
| 50 |
def resize(img, factor=16):
|
| 51 |
w, h = img.size
|
|
|
|
| 142 |
use_random_seed,
|
| 143 |
seed,
|
| 144 |
):
|
| 145 |
+
pipe.disable_lora()
|
| 146 |
# Prepare Condition
|
| 147 |
def resize(img, factor=16):
|
| 148 |
w, h = img.size
|
|
|
|
| 203 |
)
|
| 204 |
# Generate
|
| 205 |
result_img = generate(
|
| 206 |
+
pipe,
|
| 207 |
prompt=prompt,
|
| 208 |
conditions=[condition],
|
| 209 |
num_inference_steps=steps,
|