Update handler.py
Browse files- handler.py +12 -1
handler.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
from typing import Dict, List, Any
|
2 |
import torch
|
3 |
-
from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline, AutoPipelineForInpainting, AutoPipelineForImage2Image
|
4 |
from PIL import Image
|
5 |
import base64
|
6 |
from io import BytesIO
|
@@ -18,6 +18,12 @@ class EndpointHandler():
|
|
18 |
self.fast_pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
|
19 |
self.generator = torch.Generator(device="cuda").manual_seed(0)
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
# load StableDiffusionInpaintPipeline pipeline
|
23 |
self.pipe = AutoPipelineForInpainting.from_pretrained(
|
@@ -76,6 +82,11 @@ class EndpointHandler():
|
|
76 |
|
77 |
return image
|
78 |
|
|
|
|
|
|
|
|
|
|
|
79 |
#pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
|
80 |
|
81 |
self.pipe.enable_xformers_memory_efficient_attention()
|
|
|
1 |
from typing import Dict, List, Any
|
2 |
import torch
|
3 |
+
from diffusers import DPMSolverMultistepScheduler, StableDiffusionInpaintPipeline, AutoPipelineForInpainting, AutoPipelineForImage2Image, StableDiffusionXLImg2ImgPipeline
|
4 |
from PIL import Image
|
5 |
import base64
|
6 |
from io import BytesIO
|
|
|
18 |
self.fast_pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
|
19 |
self.generator = torch.Generator(device="cuda").manual_seed(0)
|
20 |
|
21 |
+
|
22 |
+
self.smooth_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
|
23 |
+
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
|
24 |
+
)
|
25 |
+
self.smooth_pipe.to("cuda")
|
26 |
+
|
27 |
|
28 |
# load StableDiffusionInpaintPipeline pipeline
|
29 |
self.pipe = AutoPipelineForInpainting.from_pretrained(
|
|
|
82 |
|
83 |
return image
|
84 |
|
85 |
+
if(method == "smooth"):
|
86 |
+
image = self.smooth_pipe(prompt, image=image).images[0]
|
87 |
+
|
88 |
+
return image
|
89 |
+
|
90 |
#pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
|
91 |
|
92 |
self.pipe.enable_xformers_memory_efficient_attention()
|