Spaces:
Running
on
Zero
Running
on
Zero
AlekseyCalvin
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -104,7 +104,7 @@ def update_selection(evt: gr.SelectData, width, height):
|
|
104 |
)
|
105 |
|
106 |
@spaces.GPU(duration=70)
|
107 |
-
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
|
108 |
pipe.to("cuda")
|
109 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
110 |
|
@@ -117,11 +117,12 @@ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height,
|
|
117 |
width=width,
|
118 |
height=height,
|
119 |
generator=generator,
|
|
|
120 |
joint_attention_kwargs={"scale": lora_scale},
|
121 |
).images[0]
|
122 |
return image
|
123 |
|
124 |
-
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
125 |
if selected_index is None:
|
126 |
raise gr.Error("You must select a LoRA before proceeding.")
|
127 |
|
@@ -151,7 +152,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
|
|
151 |
if randomize_seed:
|
152 |
seed = random.randint(0, MAX_SEED)
|
153 |
|
154 |
-
image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
155 |
pipe.to("cpu")
|
156 |
pipe.unload_lora_weights()
|
157 |
return image, seed
|
@@ -167,22 +168,24 @@ css = '''
|
|
167 |
'''
|
168 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
169 |
title = gr.HTML(
|
170 |
-
"""<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> SOONfactory </h1>""",
|
171 |
elem_id="title",
|
172 |
)
|
173 |
# Info blob stating what the app is running
|
174 |
info_blob = gr.HTML(
|
175 |
-
"""<div id="info_blob">
|
176 |
)
|
177 |
|
178 |
# Info blob stating what the app is running
|
179 |
info_blob = gr.HTML(
|
180 |
-
"""<div id="info_blob">
|
181 |
)
|
182 |
selected_index = gr.State(None)
|
183 |
with gr.Row():
|
184 |
-
with gr.Column(scale=
|
185 |
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Select LoRa/Style & type prompt!")
|
|
|
|
|
186 |
with gr.Column(scale=1, elem_id="gen_column"):
|
187 |
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
|
188 |
with gr.Row():
|
@@ -208,7 +211,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
208 |
|
209 |
with gr.Row():
|
210 |
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
|
211 |
-
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=
|
212 |
|
213 |
with gr.Row():
|
214 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
@@ -224,7 +227,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
224 |
gr.on(
|
225 |
triggers=[generate_button.click, prompt.submit],
|
226 |
fn=run_lora,
|
227 |
-
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
|
228 |
outputs=[result, seed]
|
229 |
)
|
230 |
|
|
|
104 |
)
|
105 |
|
106 |
@spaces.GPU(duration=70)
|
107 |
+
def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, negative_prompt, lora_scale, progress):
|
108 |
pipe.to("cuda")
|
109 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
110 |
|
|
|
117 |
width=width,
|
118 |
height=height,
|
119 |
generator=generator,
|
120 |
+
negative_prompt=negative_prompt,
|
121 |
joint_attention_kwargs={"scale": lora_scale},
|
122 |
).images[0]
|
123 |
return image
|
124 |
|
125 |
+
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, negative_prompt, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
126 |
if selected_index is None:
|
127 |
raise gr.Error("You must select a LoRA before proceeding.")
|
128 |
|
|
|
152 |
if randomize_seed:
|
153 |
seed = random.randint(0, MAX_SEED)
|
154 |
|
155 |
+
image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, negative_prompt, lora_scale, progress)
|
156 |
pipe.to("cpu")
|
157 |
pipe.unload_lora_weights()
|
158 |
return image, seed
|
|
|
168 |
'''
|
169 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
170 |
title = gr.HTML(
|
171 |
+
"""<h1><img src="https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer/resolve/main/flux_lora.png" alt="LoRA"> LibreFLUX SOONfactory </h1>""",
|
172 |
elem_id="title",
|
173 |
)
|
174 |
# Info blob stating what the app is running
|
175 |
info_blob = gr.HTML(
|
176 |
+
"""<div id="info_blob"> SOON®'s curated LoRa Gallery & Art Manufactory Space.|Runs on LibreFLUX model + Zer0int's fine-tuned CLIP (*'normal' 77 tokens)| Largely stocked w/our trained LoRAs: Historic Color, Silver Age Poets, Sots Art, more!|</div>"""
|
177 |
)
|
178 |
|
179 |
# Info blob stating what the app is running
|
180 |
info_blob = gr.HTML(
|
181 |
+
"""<div id="info_blob"> Pre-phrase Prompts w/: 1-2. HST style |3. RCA poster |4.SOTS art |5.HST Austin Osman Spare |6. Mayakovsky |7-8. Tsvetaeva |9. Akhmatova |10. Mandelshtam |11-13. Blok |14. LEN Lenin |15. Trotsky |16. Rosa Fluxenburg |17-30. HST |31. how2draw |32. propaganda poster |33. TOK photo cartoon hybrid |34. photo |35.unexpected photo |36. flmft |37. Yearbook |38. TOK portra |39. pficonics |40. retrofuturism |41. wh3r3sw4ld0 |42. amateur photo |43. crisp photo |44-45. ADU |46. Film Photo |47. ff-collage |48. HST|49-50. AOS Austin Osman Spare art |51. cover </div>"""
|
182 |
)
|
183 |
selected_index = gr.State(None)
|
184 |
with gr.Row():
|
185 |
+
with gr.Column(scale=2):
|
186 |
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Select LoRa/Style & type prompt!")
|
187 |
+
with gr.Column(scale=2):
|
188 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", lines=1, placeholder="What to exclude!")
|
189 |
with gr.Column(scale=1, elem_id="gen_column"):
|
190 |
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
|
191 |
with gr.Row():
|
|
|
211 |
|
212 |
with gr.Row():
|
213 |
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
|
214 |
+
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
|
215 |
|
216 |
with gr.Row():
|
217 |
randomize_seed = gr.Checkbox(True, label="Randomize seed")
|
|
|
227 |
gr.on(
|
228 |
triggers=[generate_button.click, prompt.submit],
|
229 |
fn=run_lora,
|
230 |
+
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, negative_prompt, lora_scale],
|
231 |
outputs=[result, seed]
|
232 |
)
|
233 |
|