SDXL-FWDLora / app.py
bbsgp's picture
Update app.py
30001ee
from email import generator
from diffusers import DiffusionPipeline
import gradio as gr
import torch
from PIL import Image, ImageDraw, ImageFont
## VAE - Special VAE used for training: madebyollin/sdxl-vae-fp16-fix.
from diffusers import AutoencoderKL
model = "stabilityai/stable-diffusion-xl-base-1.0"
finetuningLayer = "bbsgp/10xFWDLora"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch_dtype = torch.float16 if device.type == 'cuda' else torch.float32
import os
# HF_API_TOKEN = os.getenv("HF_API_TOKEN")
# from huggingface_hub import login
# login(token=HF_API_TOKEN)
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch_dtype)
pipe = DiffusionPipeline.from_pretrained(
model,
vae=vae,
torch_dtype=torch_dtype,
use_safetensors=True
)
pipe.load_lora_weights(finetuningLayer)
pipe = pipe.to(device)
def create_error_image(message):
# Create a blank image with white background
width, height = 512, 512
image = Image.new('RGB', (width, height), 'white')
draw = ImageDraw.Draw(image)
# Load a truetype or opentype font file
font = ImageFont.load_default()
# Position and message
draw.text((127,251), message, font=font, fill="black")
return image
def inference(model,finetuningLayer, prompt, guidance, steps, seed):
if not prompt:
return create_error_image("Sorry, add your text prompt and try again!!")
else:
generator = torch.Generator(device).manual_seed(seed)
image = pipe(
prompt,
num_inference_steps=int(steps),
guidance_scale=guidance,
generator=generator).images[0]
return image
css = """
<style>
.finetuned-diffusion-div {
text-align: center;
max-width: 700px;
margin: 0 auto;
}
.finetuned-diffusion-div div {
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
}
.finetuned-diffusion-div div h1 {
font-weight: 900;
margin-bottom: 7px;
}
.finetuned-diffusion-div p {
margin-bottom: 10px;
font-size: 94%;
}
.finetuned-diffusion-div p a {
text-decoration: underline;
}
</style>
"""
with gr.Blocks(css=css) as demo:
gr.HTML(
"""
<div class="finetuned-diffusion-div">
<div>
<h1>Finetuned Diffusion</h1>
</div>
</div>
"""
)
with gr.Row():
with gr.Column():
model = gr.Dropdown(label="baseModel",choices=[model], default=model)
finetuningLayer= gr.Dropdown(label="Finetuning Layer", choices=[finetuningLayer], default=finetuningLayer)
prompt = gr.Textbox(label="Prompt", placeholder="photo of 10xFWD style, 2D flat illustration - it is unique identifier need to be used to identify 10xFWD")
with gr.Accordion("Advanced options", open=True):
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2)
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
run = gr.Button(value="Run")
gr.Markdown(f"Running on: {device}")
with gr.Column():
image_out = gr.Image()
## Add prompt and press enter to run
##prompt.submit(inference, inputs=[model, finetuningLayer,prompt, guidance, steps, seed], outputs=image_out)
## Click run button to run
run.click(inference, inputs=[model, finetuningLayer, prompt, guidance, steps, seed], outputs=image_out)
demo.queue(default_enabled=True).launch(share=True,debug=True)
# demo.queue()
# demo.launch(auth=("FWDDNA", "10XFWD"),share=True)