from typing import Dict, List, Any import torch from torch import autocast from diffusers import AutoPipelineForText2Image import base64 from io import BytesIO # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): # load the optimized model import torch self.pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16") self.pipe.load_lora_weights("ppierzc/sd-icongenie-model-lora-sdxl", weight_name="pytorch_lora_weights.safetensors") self.pipe = self.pipe.to(device) def __call__(self, data: Any) -> List[List[Dict[str, float]]]: """ Args: data (:obj:): includes the input data and the parameters for the inference. Return: A :obj:`dict`:. base64 encoded image """ inputs = data.pop("inputs", data) print('inputs', inputs) # run inference pipeline with autocast(device.type): image = self.pipe(inputs, num_inference_steps=1, guidance_scale=0.0).images[0] print('image', image) # encode image as base 64 buffered = BytesIO() image.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()) # postprocess the prediction return {"image": img_str.decode()}