Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -16,9 +16,9 @@ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipelin
|
|
16 |
logging.basicConfig(level=logging.INFO)
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1"
|
23 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
24 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
@@ -42,7 +42,7 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
42 |
def load_pipeline(model_name):
|
43 |
vae = AutoencoderKL.from_pretrained(
|
44 |
"madebyollin/sdxl-vae-fp16-fix",
|
45 |
-
torch_dtype=torch.
|
46 |
)
|
47 |
pipeline = (
|
48 |
StableDiffusionXLPipeline.from_single_file
|
@@ -53,7 +53,7 @@ def load_pipeline(model_name):
|
|
53 |
pipe = pipeline(
|
54 |
model_name,
|
55 |
vae=vae,
|
56 |
-
torch_dtype=torch.
|
57 |
custom_pipeline="lpw_stable_diffusion_xl",
|
58 |
use_safetensors=True,
|
59 |
add_watermarker=False,
|
@@ -64,7 +64,6 @@ def load_pipeline(model_name):
|
|
64 |
return pipe
|
65 |
|
66 |
|
67 |
-
@spaces.GPU
|
68 |
def generate(
|
69 |
prompt: str,
|
70 |
negative_prompt: str = "",
|
@@ -139,39 +138,40 @@ def generate(
|
|
139 |
logger.info(json.dumps(metadata, indent=4))
|
140 |
|
141 |
try:
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
|
|
175 |
|
176 |
if images:
|
177 |
image_paths = [
|
@@ -193,11 +193,13 @@ def generate(
|
|
193 |
utils.free_memory()
|
194 |
|
195 |
|
|
|
196 |
if torch.cuda.is_available():
|
197 |
pipe = load_pipeline(MODEL)
|
198 |
logger.info("Loaded on Device!")
|
199 |
else:
|
200 |
-
pipe =
|
|
|
201 |
|
202 |
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in config.style_list}
|
203 |
quality_prompt = {
|
@@ -206,11 +208,11 @@ quality_prompt = {
|
|
206 |
|
207 |
wildcard_files = utils.load_wildcard_files("wildcard")
|
208 |
|
209 |
-
with gr.Blocks(css="style.css") as demo:
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
gr.Markdown(
|
215 |
f"""Gradio demo for [cagliostrolab/animagine-xl-3.1](https://huggingface.co/cagliostrolab/animagine-xl-3.1)""",
|
216 |
elem_id="subtitle",
|
@@ -392,4 +394,4 @@ with gr.Blocks(css="style.css") as demo:
|
|
392 |
)
|
393 |
|
394 |
if __name__ == "__main__":
|
395 |
-
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
|
|
|
16 |
logging.basicConfig(level=logging.INFO)
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
+
DESCRIPTION = "Animagine XL 3.1"
|
20 |
+
if not torch.cuda.is_available():
|
21 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo is slower on CPU. </p>"
|
22 |
IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1"
|
23 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
24 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
|
|
42 |
def load_pipeline(model_name):
|
43 |
vae = AutoencoderKL.from_pretrained(
|
44 |
"madebyollin/sdxl-vae-fp16-fix",
|
45 |
+
torch_dtype=torch.float32,
|
46 |
)
|
47 |
pipeline = (
|
48 |
StableDiffusionXLPipeline.from_single_file
|
|
|
53 |
pipe = pipeline(
|
54 |
model_name,
|
55 |
vae=vae,
|
56 |
+
torch_dtype=torch.float32,
|
57 |
custom_pipeline="lpw_stable_diffusion_xl",
|
58 |
use_safetensors=True,
|
59 |
add_watermarker=False,
|
|
|
64 |
return pipe
|
65 |
|
66 |
|
|
|
67 |
def generate(
|
68 |
prompt: str,
|
69 |
negative_prompt: str = "",
|
|
|
138 |
logger.info(json.dumps(metadata, indent=4))
|
139 |
|
140 |
try:
|
141 |
+
with torch.no_grad():
|
142 |
+
if use_upscaler:
|
143 |
+
latents = pipe(
|
144 |
+
prompt=prompt,
|
145 |
+
negative_prompt=negative_prompt,
|
146 |
+
width=width,
|
147 |
+
height=height,
|
148 |
+
guidance_scale=guidance_scale,
|
149 |
+
num_inference_steps=num_inference_steps,
|
150 |
+
generator=generator,
|
151 |
+
output_type="latent",
|
152 |
+
).images
|
153 |
+
upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
|
154 |
+
images = upscaler_pipe(
|
155 |
+
prompt=prompt,
|
156 |
+
negative_prompt=negative_prompt,
|
157 |
+
image=upscaled_latents,
|
158 |
+
guidance_scale=guidance_scale,
|
159 |
+
num_inference_steps=num_inference_steps,
|
160 |
+
strength=upscaler_strength,
|
161 |
+
generator=generator,
|
162 |
+
output_type="pil",
|
163 |
+
).images
|
164 |
+
else:
|
165 |
+
images = pipe(
|
166 |
+
prompt=prompt,
|
167 |
+
negative_prompt=negative_prompt,
|
168 |
+
width=width,
|
169 |
+
height=height,
|
170 |
+
guidance_scale=guidance_scale,
|
171 |
+
num_inference_steps=num_inference_steps,
|
172 |
+
generator=generator,
|
173 |
+
output_type="pil",
|
174 |
+
).images
|
175 |
|
176 |
if images:
|
177 |
image_paths = [
|
|
|
193 |
utils.free_memory()
|
194 |
|
195 |
|
196 |
+
|
197 |
if torch.cuda.is_available():
|
198 |
pipe = load_pipeline(MODEL)
|
199 |
logger.info("Loaded on Device!")
|
200 |
else:
|
201 |
+
pipe = load_pipeline(MODEL)
|
202 |
+
logger.info("Loaded on CPU!")
|
203 |
|
204 |
styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in config.style_list}
|
205 |
quality_prompt = {
|
|
|
208 |
|
209 |
wildcard_files = utils.load_wildcard_files("wildcard")
|
210 |
|
211 |
+
with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
212 |
+
title = gr.HTML(
|
213 |
+
f"""<h1><span>{DESCRIPTION}</span></h1>""",
|
214 |
+
elem_id="title",
|
215 |
+
)
|
216 |
gr.Markdown(
|
217 |
f"""Gradio demo for [cagliostrolab/animagine-xl-3.1](https://huggingface.co/cagliostrolab/animagine-xl-3.1)""",
|
218 |
elem_id="subtitle",
|
|
|
394 |
)
|
395 |
|
396 |
if __name__ == "__main__":
|
397 |
+
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
|