|
import gradio as gr |
|
import os, sys |
|
|
|
import argparse |
|
import copy |
|
|
|
from IPython.display import display |
|
from PIL import Image, ImageDraw, ImageFont |
|
from torchvision.ops import box_convert |
|
|
|
import supervision as sv |
|
|
|
|
|
from segment_anything import build_sam, SamPredictor |
|
import cv2 |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
import PIL |
|
import requests |
|
import torch |
|
from io import BytesIO |
|
from diffusers import StableDiffusionInpaintPipeline |
|
|
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained( |
|
"stabilityai/stable-diffusion-2-inpainting", |
|
torch_dtype=torch.float16, |
|
).to(device) |
|
|
|
def generate_image(image, mask, prompt, negative_prompt, pipe, seed): |
|
|
|
w, h = image.size |
|
in_image = image.resize((512, 512)) |
|
in_mask = mask.resize((512, 512)) |
|
|
|
generator = torch.Generator(device).manual_seed(seed) |
|
|
|
result = pipe(image=in_image, mask_image=in_mask, prompt=prompt, negative_prompt=negative_prompt, generator=generator) |
|
result = result.images[0] |
|
|
|
return result.resize((w, h)) |
|
|
|
prompt="perfect skin" |
|
negative_prompt="" |
|
seed = 7 |
|
def predict(image, mask): |
|
|
|
image_source_pil = Image.fromarray(image) |
|
image_mask_pil = Image.fromarray(mask) |
|
|
|
|
|
generated_image = generate_image(image=image_source_pil, mask=image_mask_pil, prompt=prompt, negative_prompt=negative_prompt, pipe=sd_pipe, seed=seed) |
|
return generated_image |
|
|
|
if __name__ == "__main__": |
|
io = gr.Interface(predict, ["image", "image"], "image").launch(server_name="0.0.0.0", share=True) |
|
|