sg0420 commited on
Commit
5359785
·
1 Parent(s): fa3cbdd

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +57 -0
handler.py CHANGED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import torch
3
+ from torch import autocast
4
+ from diffusers import StableDiffusionPipeline
5
+ import base64
6
+ from io import BytesIO
7
+ import os
8
+ import random
9
+ from os import path
10
+ from contextlib import nullcontext
11
+ import time
12
+ from sys import platform
13
+ import torch
14
+ from diffusers import AutoPipelineForImage2Image, LCMScheduler
15
+ from diffusers.utils import load_image
16
+
17
+ # set device
18
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
19
+
20
+ if device.type != 'cuda':
21
+ raise ValueError("need to run on GPU")
22
+
23
+ class EndpointHandler():
24
+ def __init__(self, path=""):
25
+ # load the optimized model
26
+ self.pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
27
+ self.pipe = self.pipe.to(device)
28
+
29
+ lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5"
30
+ self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
31
+
32
+ self.pipe.load_lora_weights(lcm_lora_id)
33
+ self.pipe.fuse_lora()
34
+
35
+
36
+ def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
37
+ """
38
+ Args:
39
+ data (:obj:):
40
+ includes the input data and the parameters for the inference.
41
+ Return:
42
+ A :obj:`dict`:. base64 encoded image
43
+ """
44
+ inputs = data.pop("inputs", data)
45
+
46
+ # run inference pipeline
47
+ with torch.inference_mode():
48
+ with autocast(device.type):
49
+ image = self.pipe(inputs, guidance_scale=1, num_inference_steps=4).images[0]
50
+
51
+ # encode image as base 64
52
+ buffered = BytesIO()
53
+ image.save(buffered, format="JPEG")
54
+ img_str = base64.b64encode(buffered.getvalue())
55
+
56
+ # postprocess the prediction
57
+ return {"image": img_str.decode()}