from typing import Dict, List, Any import torch from torch import autocast from diffusers import StableDiffusionPipeline import base64 from io import BytesIO import os import random from os import path from contextlib import nullcontext import time from sys import platform import torch from diffusers import AutoPipelineForImage2Image, LCMScheduler from diffusers.utils import load_image # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): # load the optimized model self.pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) self.pipe = self.pipe.to(device) lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5" self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config) self.pipe.load_lora_weights(lcm_lora_id) self.pipe.fuse_lora() def __call__(self, data: Any) -> List[List[Dict[str, float]]]: """ Args: data (:obj:): includes the input data and the parameters for the inference. Return: A :obj:`dict`:. base64 encoded image """ inputs = data.pop("inputs", data) # run inference pipeline with torch.inference_mode(): with autocast(device.type): image = self.pipe(inputs, guidance_scale=1, num_inference_steps=4).images[0] # encode image as base 64 buffered = BytesIO() image.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()) # postprocess the prediction return {"image": img_str.decode()}