import spaces
import gradio as gr
from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoencoderKL
from diffusers.utils import load_image
import torch
from PIL import Image, ImageOps

vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)

text_pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
text_pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
text_pipeline.set_ip_adapter_scale(0.6)

image_pipeline = AutoPipelineForImage2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
image_pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
image_pipeline.set_ip_adapter_scale(0.6)

inpaint_pipeline = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
inpaint_pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
inpaint_pipeline.set_ip_adapter_scale(0.6)

@spaces.GPU(enable_queue=True)
def text_to_image(ip, prompt, neg_prompt, width, height, ip_scale, strength, guidance, steps):
    text_pipeline.to("cuda")
    
    ip.thumbnail((1024, 1024))
    
    text_pipeline.set_ip_adapter_scale(ip_scale)
    
    images = text_pipeline(
        prompt=prompt,
        ip_adapter_image=ip,
        negative_prompt=neg_prompt,
        width=width,
        height=height,
        strength=strength,
        guidance_scale=guidance,
        num_inference_steps=steps,
    ).images
    
    return images[0]

@spaces.GPU(enable_queue=True)
def image_to_image(ip, image, prompt, neg_prompt, width, height, ip_scale, strength, guidance, steps):
    image_pipeline.to("cuda")
    
    ip.thumbnail((1024, 1024))
    image.thumbnail((1024, 1024))
    
    image_pipeline.set_ip_adapter_scale(ip_scale)
    
    images = image_pipeline(
        prompt=prompt,
        image=image,
        ip_adapter_image=ip,
        negative_prompt=neg_prompt,
        width=width,
        height=height,
        strength=strength,
        guidance_scale=guidance,
        num_inference_steps=steps,
    ).images
    
    return images[0]

@spaces.GPU(enable_queue=True)
def inpaint(ip, image_editor, prompt, neg_prompt, width, height, ip_scale, strength, guidance, steps):
    inpaint_pipeline.to("cuda")
    print(image_editor)
    image = image_editor['background'].convert('RGB')
    mask = Image.new("RGBA", image_editor["layers"][0].size, "WHITE") 
    mask.paste(image_editor["layers"][0], (0, 0), image_editor["layers"][0])
    mask = ImageOps.invert(mask.convert('L'))
    
    ip.thumbnail((1024, 1024))
    image.thumbnail((1024, 1024))
    mask.thumbnail((1024, 1024))
    
    inpaint_pipeline.set_ip_adapter_scale(ip_scale)
    
    images = inpaint_pipeline(
        prompt=prompt,
        image=image,
        mask_image=mask,
        ip_adapter_image=ip,
        negative_prompt=neg_prompt,
        width=width,
        height=height,
        strength=strength,
        guidance_scale=guidance,
        num_inference_steps=steps,
    ).images
    
    return images[0]


with gr.Blocks() as demo:
    gr.Markdown("""
    # IP-Adapter Playground

    by [Tony Assi](https://www.tonyassi.com/)
    """)
    with gr.Row():
        with gr.Tab("Text-to-Image"):
            text_ip = gr.Image(label='IP-Adapter Image', type='pil')
            text_prompt = gr.Textbox(label='Prompt')
            text_button = gr.Button("Generate")
        with gr.Tab("Image-to-Image"):
            image_ip = gr.Image(label='IP-Adapter Image', type='pil')
            image_image = gr.Image(label='Image', type='pil')
            image_prompt = gr.Textbox(label='Prompt')
            image_button = gr.Button("Generate")
        with gr.Tab("Inpainting"):
            inpaint_ip = gr.Image(label='IP-Adapter Image', type='pil')
            inpaint_editor = gr.ImageMask(type='pil')
            inpaint_prompt = gr.Textbox(label='Prompt')
            inpaint_button = gr.Button("Generate")
            
        output_image = gr.Image(label='Result')
        
    with gr.Accordion("Advanced Settings", open=False):
        neg_prompt = gr.Textbox(label='Negative Prompt', value='ugly, deformed, nsfw')
        width_slider = gr.Slider(256, 1024, value=1024, step=8, label="Width")
        height_slider = gr.Slider(256, 1024, value=1024, step=8, label="Height")
        ip_scale_slider = gr.Slider(0.0, 1.0, value=0.8, label="IP-Adapter Scale")
        strength_slider = gr.Slider(0.0, 1.0, value=0.7, label="Strength")
        guidance_slider = gr.Slider(1.0, 15.0, value=7.5, label="Guidance")
        steps_slider = gr.Slider(50, 100, value=75, step=1, label="Steps")

    gr.Examples(
        [["./images/img1.jpg", "Paris Hilton", "ugly, deformed, nsfw",  1024, 1024, 0.8, 0.7, 7.5, 75]],
        [text_ip, text_prompt, neg_prompt, width_slider, height_slider, ip_scale_slider, strength_slider, guidance_slider, steps_slider],
        output_image,
        text_to_image,
        cache_examples=False,
        label='Text-to-Image Example'
    )

    gr.Examples(
        [["./images/img1.jpg", "./images/tony.jpg", "photo", "ugly, deformed, nsfw",  1024, 1024, 0.8, 0.7, 7.5, 75]],
        [image_ip, image_image, image_prompt, neg_prompt, width_slider, height_slider, ip_scale_slider, strength_slider, guidance_slider, steps_slider], 
        output_image,
        image_to_image,
        cache_examples=False,
        label='Image-to-Image Example'
    )
    

    text_button.click(text_to_image, inputs=[text_ip, text_prompt, neg_prompt, width_slider, height_slider, ip_scale_slider, strength_slider, guidance_slider, steps_slider], outputs=output_image)
    image_button.click(image_to_image, inputs=[image_ip, image_image, image_prompt, neg_prompt, width_slider, height_slider, ip_scale_slider, strength_slider, guidance_slider, steps_slider], outputs=output_image)
    inpaint_button.click(inpaint, inputs=[inpaint_ip, inpaint_editor, inpaint_prompt, neg_prompt, width_slider, height_slider, ip_scale_slider, strength_slider, guidance_slider, steps_slider], outputs=output_image)

demo.launch()