File size: 6,619 Bytes
9244e51
 
 
003a57c
 
5043916
0b4a056
 
 
 
dcb1138
dd3f9d3
 
 
 
 
 
 
 
 
 
8fa0841
 
 
dcb1138
 
9244e51
e490d6f
9244e51
 
 
 
 
 
8300e39
9244e51
dcb1138
f0b0540
0435f91
db01d0c
e7d1e40
9244e51
 
 
 
 
 
 
8300e39
e490d6f
dcb1138
 
0b4a056
dcb1138
 
 
 
 
 
 
 
 
 
0b4a056
dcb1138
e490d6f
dcb1138
9244e51
72ff7a6
dcb1138
e490d6f
9244e51
8300e39
 
 
 
 
 
 
9244e51
 
dcb1138
e490d6f
 
 
dcb1138
 
 
 
 
 
 
 
 
e490d6f
 
dcb1138
8fa0841
 
 
99c56f7
e490d6f
9244e51
 
 
c8d4d3d
9244e51
c8d4d3d
9244e51
 
 
 
bbecd0b
9244e51
 
 
 
 
 
 
 
 
 
8300e39
9244e51
2f6f262
884af31
8300e39
 
 
 
 
 
 
f0b0540
0435f91
 
e490d6f
0435f91
8300e39
9244e51
 
 
 
 
 
 
 
8300e39
 
f0b0540
9244e51
 
8300e39
9244e51
 
 
8300e39
 
9244e51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import gradio as gr
from PIL import Image
import torch
#from diffusers import FluxControlNetModel
#from diffusers.pipelines import FluxControlNetPipeline

from diffusers import DiffusionPipeline

#from diffusers import FluxControlNetPipeline
#from diffusers import FluxControlNetModel #, FluxMultiControlNetModel

"""
from diffusers import DiffusionPipeline

pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev")
pipe.load_lora_weights("enhanceaiteam/Flux-Uncensored-V2")

prompt = "nsfw nude woman on beach, sunset, long flowing hair, sensual pose"
image = pipe(prompt).images[0]
"""

#import torch.nn.functional as F
#import torchvision
#import torchvision.transforms as T
#import cv2

from diffusers import StableDiffusionInpaintPipeline

import numpy as np
import os
import shutil
from gradio_client import Client, handle_file

# Load the model once globally to avoid repeated loading
"""
def load_inpainting_model():
    # Load pipeline
    #model_path = "urpmv13Inpainting.safetensors"
    model_path = "uberRealisticPornMerge_v23Inpainting.safetensors"
    #model_path = "pornmasterFantasy_v4-inpainting.safetensors"
    #model_path = "pornmasterAmateur_v6Vae-inpainting.safetensors"
    device = "cpu"  # Explicitly use CPU
    pipe = StableDiffusionInpaintPipeline.from_single_file(
        model_path,
        torch_dtype=torch.float32,  # Use float32 for CPU
        safety_checker=None
    ).to(device)
    return pipe
"""
"""
# Load the model once globally to avoid repeated loading
def load_upscaling_model():
    # Load pipeline
    device = "cpu"  # Explicitly use CPU
    controlnet = FluxControlNetModel.from_pretrained(
        "jasperai/Flux.1-dev-Controlnet-Upscaler",
        torch_dtype=torch.float32
    )
    pipe = FluxControlNetPipeline.from_pretrained(
        "black-forest-labs/FLUX.1-dev",
        controlnet=controlnet,
        torch_dtype=torch.float32
    ).to(device)
    pipe = DiffusionPipeline.from_pretrained("jasperai/Flux.1-dev-Controlnet-Upscaler")    
    return pipe
"""

# Preload the model once
#inpaint_pipeline = load_inpainting_model()
# Preload the model once
#upscale_pipeline = load_upscaling_model()

def resize_image(orig_image):
    aspect_ratio = orig_image.height / orig_image.width
    new_width = orig_image.width*1.1
    new_height = new_width * aspect_ratio
    resized_image = orig_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
    return resized_image

# Function to resize image (simpler interpolation method for speed)
def resize_to_match(input_image, output_image):

    #w, h = output_image.size
    #control_image = output_image.resize((w * 4, h * 4))
    """    
    scaled_image = pipe(
        prompt="", 
        control_image=control_image,
        controlnet_conditioning_scale=0.6,
        num_inference_steps=28, 
        guidance_scale=3.5,
        height=control_image.size[1],
        width=control_image.size[0]
    ).images[0]    
    """
    #return scaled_image
    
    #torch_img = pil_to_torch(input_image)
    #torch_img_scaled = F.interpolate(torch_img.unsqueeze(0),mode='trilinear').squeeze(0)
    #output_image = torchvision.transforms.functional.to_pil_image(torch_img_scaled, mode=None)
    
    return output_image.resize(input_image.size, Image.BICUBIC)  # Use BILINEAR for faster resizing

# Function to generate the mask using Florence SAM Masking API (Replicate)
def generate_mask(image_path, text_prompt="clothing"):
    client_sam = Client("SkalskiP/florence-sam-masking")
    mask_result = client_sam.predict(
        #mode_dropdown = "open vocabulary detection + image masks",
        image_input=handle_file(image_path),  # Provide your image path here
        text_input=text_prompt,  # Use "clothing" as the prompt
        api_name="/process_image"
    )
    print("mask_result=", mask_result)
    return mask_result  # This is the local path to the generated mask

# Save the generated mask
def save_mask(mask_local_path, save_path="generated_mask.png"):
    try:
        shutil.copy(mask_local_path, save_path)
    except Exception as e:
        print(f"Failed to save the mask: {e}")

# Function to perform inpainting
"""
def inpaint_image(input_image, mask_image):
    prompt = "undress, naked, real skin, detailed nipples, erect nipples, detailed pussy, (detailed nipples), (detailed skin), (detailed pussy), accurate anatomy"
    negative_prompt = "bad anatomy, deformed, ugly, disfigured, (extra arms), (extra legs), (extra hands), (extra feet), (extra finger)"
    
    #IMAGE_SIZE = (1024,1024)
    #initial_input_image = input_image.resize(IMAGE_SIZE)
    #initial_mask_image = mask_image.resize(IMAGE_SIZE)
    #blurred_mask_image = inpaint_pipeline.mask_processor.blur(initial_mask_image,blur_factor=10)
    #result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, height=IMAGE_SIZE[0], width=IMAGE_SIZE[0], image=initial_input_image, mask_image=blurred_mask_image, padding_mask_crop=32)
    
    #blurred_mask_image = inpaint_pipeline.mask_processor.blur(mask_image,blur_factor=10)
    result = inpaint_pipeline(prompt=prompt, negative_prompt=negative_prompt, image=input_image, mask_image=mask_image, padding_mask_crop=10)
    inpainted_image = result.images[0]
    #inpainted_image = resize_to_match(input_image, inpainted_image)
    return inpainted_image
"""

# Function to process input image and mask
def process_image(input_image):
    # Save the input image temporarily to process with Replicate
    input_image_path = "temp_input_image.png"
    input_image.save(input_image_path)

    # Generate the mask using Florence SAM API
    mask_local_path = generate_mask(image_path=input_image_path)
    #mask_local_path1 = str(mask_local_path)#[0])
    
    # Save the generated mask
    mask_image_path = "generated_mask.png"
    save_mask(mask_local_path, save_path=mask_image_path)

    # Open the mask image and perform inpainting
    mask_image = Image.open(mask_image_path)
    
    result_image = resize_image(mask_image)

    # Clean up temporary files
    os.remove(input_image_path)
    os.remove(mask_image_path)

    return result_image

# Define Gradio interface using Blocks API
with gr.Blocks() as demo:
    with gr.Row():
        input_image = gr.Image(label="Upload Input Image", type="pil")
        output_image = gr.Image(type="pil", label="Output Image")

    # Button to trigger the process
    with gr.Row():
        btn = gr.Button("Run Inpainting")

    # Function to run when button is clicked
    btn.click(fn=process_image, inputs=[input_image], outputs=output_image)

# Launch the Gradio app
demo.launch(share=True)