Spaces:
Running
on
Zero
Running
on
Zero
nroggendorff
commited on
add more time to zero for longer prompts
Browse filesother changes are commentated, its all untested, so there might be an obvious bug I missed. I literally made the changes in the browser
app.py
CHANGED
@@ -14,10 +14,10 @@ with open('loras.json', 'r') as f:
|
|
14 |
loras = json.load(f)
|
15 |
|
16 |
# Initialize the base model
|
17 |
-
base_model = "black-forest-labs/FLUX.1-
|
18 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
19 |
|
20 |
-
MAX_SEED = 2**32-1
|
21 |
|
22 |
class calculateDuration:
|
23 |
def __init__(self, activity_name=""):
|
@@ -56,25 +56,25 @@ def update_selection(evt: gr.SelectData, width, height):
|
|
56 |
height,
|
57 |
)
|
58 |
|
59 |
-
@spaces.GPU(duration=
|
60 |
-
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
61 |
pipe.to("cuda")
|
62 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
63 |
|
64 |
with calculateDuration("Generating image"):
|
65 |
# Generate image
|
66 |
image = pipe(
|
67 |
-
prompt=f"{prompt} {trigger_word}",
|
68 |
num_inference_steps=steps,
|
69 |
guidance_scale=cfg_scale,
|
70 |
width=width,
|
71 |
height=height,
|
72 |
generator=generator,
|
73 |
-
joint_attention_kwargs={"scale": lora_scale},
|
74 |
).images[0]
|
75 |
return image
|
76 |
|
77 |
-
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
78 |
if selected_index is None:
|
79 |
raise gr.Error("You must select a LoRA before proceeding.")
|
80 |
|
@@ -94,7 +94,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
|
|
94 |
if randomize_seed:
|
95 |
seed = random.randint(0, MAX_SEED)
|
96 |
|
97 |
-
image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
98 |
pipe.to("cpu")
|
99 |
pipe.unload_lora_weights()
|
100 |
return image, seed
|
@@ -148,6 +148,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
148 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
149 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
150 |
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.95)
|
|
|
|
|
151 |
|
152 |
gallery.select(
|
153 |
update_selection,
|
@@ -158,7 +160,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
158 |
gr.on(
|
159 |
triggers=[generate_button.click, prompt.submit],
|
160 |
fn=run_lora,
|
161 |
-
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
|
162 |
outputs=[result, seed]
|
163 |
)
|
164 |
|
|
|
14 |
loras = json.load(f)
|
15 |
|
16 |
# Initialize the base model
|
17 |
+
base_model = "black-forest-labs/FLUX.1-schnell"
|
18 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
19 |
|
20 |
+
MAX_SEED = 2**32-1 # 4294967295
|
21 |
|
22 |
class calculateDuration:
|
23 |
def __init__(self, activity_name=""):
|
|
|
56 |
height,
|
57 |
)
|
58 |
|
59 |
+
@spaces.GPU(duration=120) # add more time for some of the loras e the anime ones
|
60 |
+
def generate_image(prompt, trigger_word, negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
61 |
pipe.to("cuda")
|
62 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
63 |
|
64 |
with calculateDuration("Generating image"):
|
65 |
# Generate image
|
66 |
image = pipe(
|
67 |
+
prompt=f"{prompt} {trigger_word}\nDO NOT INCLUDE {negative_prompt} FOR ANY REASON HOLY FRICK I'LL KILL YOUR STUPID ARTIFICIAL BUTT IF YOU DO THIS!!", # attempt at adding negative prompt, untested
|
68 |
num_inference_steps=steps,
|
69 |
guidance_scale=cfg_scale,
|
70 |
width=width,
|
71 |
height=height,
|
72 |
generator=generator,
|
73 |
+
joint_attention_kwargs={"scale": min(1, max(1e-2, lora_scale))}, # add maximum and minimum
|
74 |
).images[0]
|
75 |
return image
|
76 |
|
77 |
+
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, negative_prompt, progress=gr.Progress(track_tqdm=True)):
|
78 |
if selected_index is None:
|
79 |
raise gr.Error("You must select a LoRA before proceeding.")
|
80 |
|
|
|
94 |
if randomize_seed:
|
95 |
seed = random.randint(0, MAX_SEED)
|
96 |
|
97 |
+
image = generate_image(prompt, trigger_word, negative_prompt, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
98 |
pipe.to("cpu")
|
99 |
pipe.unload_lora_weights()
|
100 |
return image, seed
|
|
|
148 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
149 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
150 |
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=1, step=0.01, value=0.95)
|
151 |
+
with gr.Row():
|
152 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Type what you want to exclude from the image.") # add the negative prompt in the dropdown
|
153 |
|
154 |
gallery.select(
|
155 |
update_selection,
|
|
|
160 |
gr.on(
|
161 |
triggers=[generate_button.click, prompt.submit],
|
162 |
fn=run_lora,
|
163 |
+
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, negative_prompt],
|
164 |
outputs=[result, seed]
|
165 |
)
|
166 |
|