Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,453 Bytes
332f3a6 304ca9b c7435a7 332f3a6 c7435a7 b3cb9c5 ab3ccf8 c7435a7 332f3a6 5708b92 332f3a6 e4ac65c 332f3a6 c7435a7 37becc8 332f3a6 da063b9 24e9e76 da063b9 3a6712a da063b9 3a6712a ae17f55 332f3a6 c7435a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import os
import numpy as np
import gradio as gr
from utils.t2i import t2i_gen
MAX_SEED = np.iinfo(np.int32).max
MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", "512"))
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
with gr.Blocks(
title="🪄 LayerDiffuse - Flux version",
theme="CultriX/gradio-theme"
) as demo:
gr.Markdown(
"""
# 🪄 LayerDiffuse - Flux version
A Flux version implementation of LayerDiffuse ([LayerDiffuse](https://github.com/lllyasviel/LayerDiffuse))
**Feel free to open a PR and contribute to this demo to help improve it!**
"""
)
prompt = gr.Text(
label="Prompt",
info="Your prompt here",
placeholder="E.g: glass bottle, high quality"
)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=MIN_IMAGE_SIZE,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=MIN_IMAGE_SIZE,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=1,
maximum=20,
step=0.1,
value=3.5,
)
num_inference_steps = gr.Slider(
label="Steps",
minimum=10,
maximum=100,
step=1,
value=50,
)
t2i_gen_bttn = gr.Button("Generate")
t2i_result = gr.Image(
label="Result",
show_label=False,
format="png"
)
gr.on(
triggers=[
t2i_gen_bttn.click
],
fn=lambda: gr.update(interactive=False, value="Generating..."),
outputs=t2i_gen_bttn,
api_name=False
).then(
fn=t2i_gen,
inputs=[
prompt,
seed,
width,
height,
guidance_scale,
num_inference_steps
],
outputs=t2i_result
).then(
fn=lambda: gr.update(interactive=True, value="Generate"),
outputs=t2i_gen_bttn,
api_name=False
)
if __name__ == "__main__":
demo.queue(max_size=20).launch(show_error=True) |