Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
from PIL import Image | |
from src.pipeline_pe_clone import FluxPipeline | |
import spaces | |
def generate_image(model_path, image, height, width, prompt, guidance_scale, num_steps, lora_name): | |
# Load the model | |
pipeline = FluxPipeline.from_pretrained( | |
model_path, | |
torch_dtype=torch.bfloat16, | |
).to('cuda') | |
# Load and fuse base LoRA weights | |
pipeline.load_lora_weights("nicolaus-huang/PhotoDoodle", weight_name="pretrain.safetensors") | |
pipeline.fuse_lora() | |
pipeline.unload_lora_weights() | |
# Load selected LoRA effect if not using the pretrained base model | |
if lora_name != 'pretrained': | |
pipeline.load_lora_weights("nicolaus-huang/PhotoDoodle", weight_name=f"{lora_name}.safetensors") | |
# Prepare the input image | |
condition_image = image.resize((height, width)).convert("RGB") | |
# Generate the output image | |
result = pipeline( | |
prompt=prompt, | |
condition_image=condition_image, | |
height=height, | |
width=width, | |
guidance_scale=guidance_scale, | |
num_inference_steps=num_steps, | |
max_sequence_length=512 | |
).images[0] | |
return result | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=generate_image, | |
inputs=[ | |
gr.Textbox(label="Model Path", value="black-forest-labs/FLUX.1-dev"), | |
gr.Image(label="Input Image", type="pil"), | |
gr.Number(label="Height", value=768), | |
gr.Number(label="Width", value=512), | |
gr.Textbox(label="Prompt", value="add a halo and wings for the cat by sksmagiceffects"), | |
gr.Number(label="Guidance Scale", value=3.5), | |
gr.Number(label="Number of Steps", value=20), | |
gr.Dropdown( | |
label="LoRA Name", | |
choices=["pretrained", "sksmagiceffects", "sksmonstercalledlulu", | |
"skspaintingeffects", "sksedgeeffect", "skscatooneffect"], | |
value="sksmagiceffects" | |
) | |
], | |
outputs=gr.Image(label="Output Image", type="pil"), | |
title="FLUX Image Generation with LoRA" | |
) | |
if __name__ == "__main__": | |
iface.launch() | |