Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,171 +1,144 @@
|
|
1 |
-
import
|
|
|
|
|
|
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
-
from gradio import processing_utils, utils
|
5 |
from PIL import Image
|
6 |
-
|
|
|
|
|
7 |
|
8 |
from diffusers import (
|
9 |
-
DiffusionPipeline,
|
10 |
AutoencoderKL,
|
11 |
-
StableDiffusionControlNetPipeline,
|
12 |
ControlNetModel,
|
13 |
-
|
14 |
-
StableDiffusionImg2ImgPipeline,
|
15 |
StableDiffusionControlNetImg2ImgPipeline,
|
|
|
16 |
DPMSolverMultistepScheduler,
|
17 |
-
EulerDiscreteScheduler
|
18 |
)
|
19 |
-
|
20 |
-
import time
|
21 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
22 |
import user_history
|
23 |
from illusion_style import css
|
24 |
-
import os
|
25 |
-
from transformers import CLIPImageProcessor
|
26 |
-
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
#
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
33 |
|
34 |
-
#
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker").to("cuda")
|
40 |
-
feature_extractor = CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
41 |
|
|
|
42 |
main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
43 |
BASE_MODEL,
|
44 |
controlnet=controlnet,
|
45 |
vae=vae,
|
46 |
-
safety_checker=
|
47 |
-
feature_extractor=
|
48 |
-
torch_dtype=
|
49 |
-
)
|
|
|
50 |
|
51 |
-
#
|
52 |
-
#def check_nsfw_images(images: list[Image.Image]) -> tuple[list[Image.Image], list[bool]]:
|
53 |
-
# if SAFETY_CHECKER_ENABLED:
|
54 |
-
# safety_checker_input = feature_extractor(images, return_tensors="pt").to("cuda")
|
55 |
-
# has_nsfw_concepts = safety_checker(
|
56 |
-
# images=[images],
|
57 |
-
# clip_input=safety_checker_input.pixel_values.to("cuda")
|
58 |
-
# )
|
59 |
-
# return images, has_nsfw_concepts
|
60 |
-
# else:
|
61 |
-
# return images, [False] * len(images)
|
62 |
-
|
63 |
-
#main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
64 |
-
#main_pipe.unet.to(memory_format=torch.channels_last)
|
65 |
-
#main_pipe.unet = torch.compile(main_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
66 |
-
#model_id = "stabilityai/sd-x2-latent-upscaler"
|
67 |
image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
|
|
|
68 |
|
69 |
-
|
70 |
-
#image_pipe.unet = torch.compile(image_pipe.unet, mode="reduce-overhead", fullgraph=True)
|
71 |
-
#upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
72 |
-
#upscaler.to("cuda")
|
73 |
-
|
74 |
-
|
75 |
# Sampler map
|
|
|
76 |
SAMPLER_MAP = {
|
77 |
-
"DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(
|
|
|
|
|
78 |
"Euler": lambda config: EulerDiscreteScheduler.from_config(config),
|
79 |
}
|
80 |
|
81 |
-
|
|
|
|
|
|
|
82 |
width, height = img.size
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
right = (width + new_dimension)/2
|
89 |
-
bottom = (height + new_dimension)/2
|
90 |
-
|
91 |
-
# Crop and resize
|
92 |
img = img.crop((left, top, right, bottom))
|
93 |
img = img.resize(output_size)
|
94 |
-
|
95 |
return img
|
96 |
|
97 |
def common_upscale(samples, width, height, upscale_method, crop=False):
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
|
114 |
|
115 |
def upscale(samples, upscale_method, scale_by):
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
return (s)
|
121 |
|
122 |
def check_inputs(prompt: str, control_image: Image.Image):
|
123 |
if control_image is None:
|
124 |
raise gr.Error("Please select or upload an Input Illusion")
|
125 |
-
if
|
126 |
raise gr.Error("Prompt is required")
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
def convert_to_base64(pil_image):
|
133 |
-
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as temp_file:
|
134 |
-
image.save(temp_file.name)
|
135 |
-
return temp_file.name
|
136 |
-
|
137 |
-
# Inference function
|
138 |
@spaces.GPU
|
139 |
def inference(
|
140 |
control_image: Image.Image,
|
141 |
prompt: str,
|
142 |
negative_prompt: str,
|
143 |
guidance_scale: float = 8.0,
|
144 |
-
controlnet_conditioning_scale: float = 1,
|
145 |
-
control_guidance_start: float = 1,
|
146 |
-
control_guidance_end: float = 1,
|
147 |
upscaler_strength: float = 0.5,
|
148 |
seed: int = -1,
|
149 |
-
sampler = "DPM++ Karras SDE",
|
150 |
progress = gr.Progress(track_tqdm=True),
|
151 |
profile: gr.OAuthProfile | None = None,
|
152 |
):
|
153 |
start_time = time.time()
|
154 |
-
start_time_struct = time.localtime(start_time)
|
155 |
-
start_time_formatted = time.strftime("%H:%M:%S", start_time_struct)
|
156 |
-
print(f"Inference started at {start_time_formatted}")
|
157 |
-
|
158 |
-
# Generate the initial image
|
159 |
-
#init_image = init_pipe(prompt).images[0]
|
160 |
|
161 |
-
|
162 |
-
control_image_small = center_crop_resize(control_image)
|
163 |
control_image_large = center_crop_resize(control_image, (1024, 1024))
|
164 |
|
165 |
main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
|
166 |
-
|
167 |
-
|
168 |
-
|
|
|
|
|
169 |
out = main_pipe(
|
170 |
prompt=prompt,
|
171 |
negative_prompt=negative_prompt,
|
@@ -176,28 +149,28 @@ def inference(
|
|
176 |
control_guidance_start=float(control_guidance_start),
|
177 |
control_guidance_end=float(control_guidance_end),
|
178 |
num_inference_steps=15,
|
179 |
-
output_type="latent"
|
180 |
)
|
|
|
|
|
181 |
upscaled_latents = upscale(out, "nearest-exact", 2)
|
|
|
|
|
182 |
out_image = image_pipe(
|
183 |
prompt=prompt,
|
184 |
negative_prompt=negative_prompt,
|
185 |
-
control_image=control_image_large,
|
186 |
image=upscaled_latents,
|
187 |
guidance_scale=float(guidance_scale),
|
188 |
generator=generator,
|
189 |
num_inference_steps=20,
|
190 |
-
strength=upscaler_strength,
|
191 |
control_guidance_start=float(control_guidance_start),
|
192 |
control_guidance_end=float(control_guidance_end),
|
193 |
-
controlnet_conditioning_scale=float(controlnet_conditioning_scale)
|
194 |
)
|
195 |
-
end_time = time.time()
|
196 |
-
end_time_struct = time.localtime(end_time)
|
197 |
-
end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
|
198 |
-
print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
|
199 |
|
200 |
-
# Save
|
201 |
user_history.save_image(
|
202 |
label=prompt,
|
203 |
image=out_image["images"][0],
|
@@ -210,44 +183,49 @@ def inference(
|
|
210 |
"control_guidance_start": control_guidance_start,
|
211 |
"control_guidance_end": control_guidance_end,
|
212 |
"upscaler_strength": upscaler_strength,
|
213 |
-
"seed":
|
214 |
"sampler": sampler,
|
215 |
},
|
216 |
)
|
217 |
|
218 |
return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
|
219 |
-
|
|
|
|
|
|
|
220 |
with gr.Blocks() as app:
|
221 |
gr.Markdown(
|
222 |
'''
|
223 |
<div style="text-align: center;">
|
224 |
<h1>Illusion Diffusion HQ 🌀</h1>
|
225 |
-
<p style="font-size:16px;">Generate
|
226 |
-
<p>
|
227 |
-
<p>
|
228 |
-
<p>This project works by using <a href="https://huggingface.co/monster-labs/control_v1p_sd15_qrcode_monster">Monster Labs QR Control Net</a>. Given a prompt and your pattern, we use a QR code conditioned controlnet to create a stunning illusion! Credit to: <a href="https://twitter.com/MrUgleh">MrUgleh</a> for discovering the workflow :)</p>
|
229 |
</div>
|
230 |
'''
|
231 |
)
|
232 |
|
233 |
-
|
234 |
state_img_input = gr.State()
|
235 |
state_img_output = gr.State()
|
|
|
236 |
with gr.Row():
|
237 |
with gr.Column():
|
238 |
control_image = gr.Image(label="Input Illusion", type="pil", elem_id="control_image")
|
239 |
controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.8, label="Illusion strength", elem_id="illusion_strength", info="ControlNet conditioning scale")
|
240 |
-
gr.Examples(
|
241 |
-
|
242 |
-
|
|
|
|
|
|
|
243 |
with gr.Accordion(label="Advanced Options", open=False):
|
244 |
guidance_scale = gr.Slider(minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale")
|
245 |
-
sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="Euler")
|
246 |
-
control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="Start of ControlNet")
|
247 |
-
control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="End of ControlNet")
|
248 |
-
strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="Strength of the upscaler")
|
249 |
-
seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1
|
250 |
-
used_seed = gr.Number(label="Last seed used",interactive=False)
|
251 |
run_btn = gr.Button("Run")
|
252 |
with gr.Column():
|
253 |
result_image = gr.Image(label="Illusion Diffusion Output", interactive=False, elem_id="output")
|
@@ -256,6 +234,7 @@ with gr.Blocks() as app:
|
|
256 |
loading_icon = gr.HTML(loading_icon_html)
|
257 |
share_button = gr.Button("Share to community", elem_id="share-btn")
|
258 |
|
|
|
259 |
prompt.submit(
|
260 |
check_inputs,
|
261 |
inputs=[prompt, control_image],
|
@@ -263,8 +242,9 @@ with gr.Blocks() as app:
|
|
263 |
).success(
|
264 |
inference,
|
265 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
266 |
-
outputs=[result_image, result_image, share_group, used_seed]
|
267 |
-
|
|
|
268 |
run_btn.click(
|
269 |
check_inputs,
|
270 |
inputs=[prompt, control_image],
|
@@ -272,8 +252,9 @@ with gr.Blocks() as app:
|
|
272 |
).success(
|
273 |
inference,
|
274 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
275 |
-
outputs=[result_image, result_image, share_group, used_seed]
|
276 |
-
|
|
|
277 |
share_button.click(None, [], [], js=share_js)
|
278 |
|
279 |
with gr.Blocks(css=css) as app_with_history:
|
@@ -282,7 +263,7 @@ with gr.Blocks(css=css) as app_with_history:
|
|
282 |
with gr.Tab("Past generations"):
|
283 |
user_history.render()
|
284 |
|
285 |
-
app_with_history.queue(max_size=20,api_open=False
|
286 |
|
287 |
if __name__ == "__main__":
|
288 |
app_with_history.launch(max_threads=400)
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import random
|
4 |
+
import tempfile
|
5 |
import torch
|
6 |
import gradio as gr
|
|
|
7 |
from PIL import Image
|
8 |
+
|
9 |
+
import spaces
|
10 |
+
from gradio import processing_utils, utils
|
11 |
|
12 |
from diffusers import (
|
|
|
13 |
AutoencoderKL,
|
|
|
14 |
ControlNetModel,
|
15 |
+
StableDiffusionControlNetPipeline,
|
|
|
16 |
StableDiffusionControlNetImg2ImgPipeline,
|
17 |
+
StableDiffusionLatentUpscalePipeline,
|
18 |
DPMSolverMultistepScheduler,
|
19 |
+
EulerDiscreteScheduler,
|
20 |
)
|
21 |
+
|
|
|
22 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
23 |
import user_history
|
24 |
from illusion_style import css
|
|
|
|
|
|
|
25 |
|
26 |
+
# -----------------------------
|
27 |
+
# Device & dtype (GPU/CPU auto)
|
28 |
+
# -----------------------------
|
29 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
+
dtype = torch.float16 if device == "cuda" else torch.float32
|
31 |
|
32 |
+
# -----------------------------
|
33 |
+
# Base / ControlNet models
|
34 |
+
# -----------------------------
|
35 |
+
BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
|
36 |
+
VAE_ID = "stabilityai/sd-vae-ft-mse"
|
37 |
+
CONTROLNET_ID = "monster-labs/control_v1p_sd15_qrcode_monster"
|
38 |
|
39 |
+
# -----------------------------
|
40 |
+
# Load components
|
41 |
+
# -----------------------------
|
42 |
+
vae = AutoencoderKL.from_pretrained(VAE_ID, torch_dtype=dtype)
|
43 |
+
controlnet = ControlNetModel.from_pretrained(CONTROLNET_ID, torch_dtype=dtype)
|
|
|
|
|
44 |
|
45 |
+
# ⚠️ safety checker & clip feature extractor removed
|
46 |
main_pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
47 |
BASE_MODEL,
|
48 |
controlnet=controlnet,
|
49 |
vae=vae,
|
50 |
+
safety_checker=None, # <= important
|
51 |
+
feature_extractor=None, # <= important
|
52 |
+
torch_dtype=dtype,
|
53 |
+
)
|
54 |
+
main_pipe = main_pipe.to(device)
|
55 |
|
56 |
+
# Img2Img pipe reusing components
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
image_pipe = StableDiffusionControlNetImg2ImgPipeline(**main_pipe.components)
|
58 |
+
image_pipe = image_pipe.to(device)
|
59 |
|
60 |
+
# -----------------------------
|
|
|
|
|
|
|
|
|
|
|
61 |
# Sampler map
|
62 |
+
# -----------------------------
|
63 |
SAMPLER_MAP = {
|
64 |
+
"DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(
|
65 |
+
config, use_karras=True, algorithm_type="sde-dpmsolver++"
|
66 |
+
),
|
67 |
"Euler": lambda config: EulerDiscreteScheduler.from_config(config),
|
68 |
}
|
69 |
|
70 |
+
# -----------------------------
|
71 |
+
# Helpers
|
72 |
+
# -----------------------------
|
73 |
+
def center_crop_resize(img: Image.Image, output_size=(512, 512)):
|
74 |
width, height = img.size
|
75 |
+
new_dim = min(width, height)
|
76 |
+
left = (width - new_dim) / 2
|
77 |
+
top = (height - new_dim) / 2
|
78 |
+
right = (width + new_dim) / 2
|
79 |
+
bottom = (height + new_dim) / 2
|
|
|
|
|
|
|
|
|
80 |
img = img.crop((left, top, right, bottom))
|
81 |
img = img.resize(output_size)
|
|
|
82 |
return img
|
83 |
|
84 |
def common_upscale(samples, width, height, upscale_method, crop=False):
|
85 |
+
if crop == "center":
|
86 |
+
old_w = samples.shape[3]
|
87 |
+
old_h = samples.shape[2]
|
88 |
+
old_aspect = old_w / old_h
|
89 |
+
new_aspect = width / height
|
90 |
+
x = 0
|
91 |
+
y = 0
|
92 |
+
if old_aspect > new_aspect:
|
93 |
+
x = round((old_w - old_w * (new_aspect / old_aspect)) / 2)
|
94 |
+
elif old_aspect < new_aspect:
|
95 |
+
y = round((old_h - old_h * (old_aspect / new_aspect)) / 2)
|
96 |
+
s = samples[:, :, y : old_h - y, x : old_w - x]
|
97 |
+
else:
|
98 |
+
s = samples
|
99 |
+
return torch.nn.functional.interpolate(s, size=(height, width), mode=upscale_method)
|
|
|
100 |
|
101 |
def upscale(samples, upscale_method, scale_by):
|
102 |
+
width = round(samples["images"].shape[3] * scale_by)
|
103 |
+
height = round(samples["images"].shape[2] * scale_by)
|
104 |
+
s = common_upscale(samples["images"], width, height, upscale_method, "disabled")
|
105 |
+
return s
|
|
|
106 |
|
107 |
def check_inputs(prompt: str, control_image: Image.Image):
|
108 |
if control_image is None:
|
109 |
raise gr.Error("Please select or upload an Input Illusion")
|
110 |
+
if not prompt:
|
111 |
raise gr.Error("Prompt is required")
|
112 |
|
113 |
+
# -----------------------------
|
114 |
+
# Inference
|
115 |
+
# -----------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
@spaces.GPU
|
117 |
def inference(
|
118 |
control_image: Image.Image,
|
119 |
prompt: str,
|
120 |
negative_prompt: str,
|
121 |
guidance_scale: float = 8.0,
|
122 |
+
controlnet_conditioning_scale: float = 1.0,
|
123 |
+
control_guidance_start: float = 1.0,
|
124 |
+
control_guidance_end: float = 1.0,
|
125 |
upscaler_strength: float = 0.5,
|
126 |
seed: int = -1,
|
127 |
+
sampler: str = "DPM++ Karras SDE",
|
128 |
progress = gr.Progress(track_tqdm=True),
|
129 |
profile: gr.OAuthProfile | None = None,
|
130 |
):
|
131 |
start_time = time.time()
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
+
control_image_small = center_crop_resize(control_image, (512, 512))
|
|
|
134 |
control_image_large = center_crop_resize(control_image, (1024, 1024))
|
135 |
|
136 |
main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
|
137 |
+
|
138 |
+
my_seed = random.randint(0, 2**32 - 1) if seed == -1 else int(seed)
|
139 |
+
generator = torch.Generator(device=device).manual_seed(my_seed)
|
140 |
+
|
141 |
+
# First pass -> latents
|
142 |
out = main_pipe(
|
143 |
prompt=prompt,
|
144 |
negative_prompt=negative_prompt,
|
|
|
149 |
control_guidance_start=float(control_guidance_start),
|
150 |
control_guidance_end=float(control_guidance_end),
|
151 |
num_inference_steps=15,
|
152 |
+
output_type="latent",
|
153 |
)
|
154 |
+
|
155 |
+
# Upscale latents
|
156 |
upscaled_latents = upscale(out, "nearest-exact", 2)
|
157 |
+
|
158 |
+
# Second pass -> image
|
159 |
out_image = image_pipe(
|
160 |
prompt=prompt,
|
161 |
negative_prompt=negative_prompt,
|
162 |
+
control_image=control_image_large,
|
163 |
image=upscaled_latents,
|
164 |
guidance_scale=float(guidance_scale),
|
165 |
generator=generator,
|
166 |
num_inference_steps=20,
|
167 |
+
strength=float(upscaler_strength),
|
168 |
control_guidance_start=float(control_guidance_start),
|
169 |
control_guidance_end=float(control_guidance_end),
|
170 |
+
controlnet_conditioning_scale=float(controlnet_conditioning_scale),
|
171 |
)
|
|
|
|
|
|
|
|
|
172 |
|
173 |
+
# Save history
|
174 |
user_history.save_image(
|
175 |
label=prompt,
|
176 |
image=out_image["images"][0],
|
|
|
183 |
"control_guidance_start": control_guidance_start,
|
184 |
"control_guidance_end": control_guidance_end,
|
185 |
"upscaler_strength": upscaler_strength,
|
186 |
+
"seed": my_seed,
|
187 |
"sampler": sampler,
|
188 |
},
|
189 |
)
|
190 |
|
191 |
return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
|
192 |
+
|
193 |
+
# -----------------------------
|
194 |
+
# UI
|
195 |
+
# -----------------------------
|
196 |
with gr.Blocks() as app:
|
197 |
gr.Markdown(
|
198 |
'''
|
199 |
<div style="text-align: center;">
|
200 |
<h1>Illusion Diffusion HQ 🌀</h1>
|
201 |
+
<p style="font-size:16px;">Generate high-quality illusion artwork with Stable Diffusion + ControlNet</p>
|
202 |
+
<p>A space by AP with contributions from the community.</p>
|
203 |
+
<p>This uses <a href="https://huggingface.co/monster-labs/control_v1p_sd15_qrcode_monster">Monster Labs QR ControlNet</a>.</p>
|
|
|
204 |
</div>
|
205 |
'''
|
206 |
)
|
207 |
|
|
|
208 |
state_img_input = gr.State()
|
209 |
state_img_output = gr.State()
|
210 |
+
|
211 |
with gr.Row():
|
212 |
with gr.Column():
|
213 |
control_image = gr.Image(label="Input Illusion", type="pil", elem_id="control_image")
|
214 |
controlnet_conditioning_scale = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.8, label="Illusion strength", elem_id="illusion_strength", info="ControlNet conditioning scale")
|
215 |
+
gr.Examples(
|
216 |
+
examples=["checkers.png", "checkers_mid.jpg", "pattern.png", "ultra_checkers.png", "spiral.jpeg", "funky.jpeg"],
|
217 |
+
inputs=control_image
|
218 |
+
)
|
219 |
+
prompt = gr.Textbox(label="Prompt", elem_id="prompt", info="Type what you want to generate", placeholder="Medieval village scene with busy streets and a castle in the distance")
|
220 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", info="What you do NOT want", value="low quality, blurry", elem_id="negative_prompt")
|
221 |
with gr.Accordion(label="Advanced Options", open=False):
|
222 |
guidance_scale = gr.Slider(minimum=0.0, maximum=50.0, step=0.25, value=7.5, label="Guidance Scale")
|
223 |
+
sampler = gr.Dropdown(choices=list(SAMPLER_MAP.keys()), value="Euler", label="Sampler")
|
224 |
+
control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.0, label="Start of ControlNet")
|
225 |
+
control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="End of ControlNet")
|
226 |
+
strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1.0, label="Strength of the upscaler")
|
227 |
+
seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1 = random")
|
228 |
+
used_seed = gr.Number(label="Last seed used", interactive=False)
|
229 |
run_btn = gr.Button("Run")
|
230 |
with gr.Column():
|
231 |
result_image = gr.Image(label="Illusion Diffusion Output", interactive=False, elem_id="output")
|
|
|
234 |
loading_icon = gr.HTML(loading_icon_html)
|
235 |
share_button = gr.Button("Share to community", elem_id="share-btn")
|
236 |
|
237 |
+
# Wire up
|
238 |
prompt.submit(
|
239 |
check_inputs,
|
240 |
inputs=[prompt, control_image],
|
|
|
242 |
).success(
|
243 |
inference,
|
244 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
245 |
+
outputs=[result_image, result_image, share_group, used_seed]
|
246 |
+
)
|
247 |
+
|
248 |
run_btn.click(
|
249 |
check_inputs,
|
250 |
inputs=[prompt, control_image],
|
|
|
252 |
).success(
|
253 |
inference,
|
254 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
255 |
+
outputs=[result_image, result_image, share_group, used_seed]
|
256 |
+
)
|
257 |
+
|
258 |
share_button.click(None, [], [], js=share_js)
|
259 |
|
260 |
with gr.Blocks(css=css) as app_with_history:
|
|
|
263 |
with gr.Tab("Past generations"):
|
264 |
user_history.render()
|
265 |
|
266 |
+
app_with_history.queue(max_size=20, api_open=False)
|
267 |
|
268 |
if __name__ == "__main__":
|
269 |
app_with_history.launch(max_threads=400)
|