sdxl-optimized / src /pipeline.py
mhussainahmad's picture
Model pushed
0807d69
import torch
from PIL.Image import Image
from diffusers import StableDiffusionXLPipeline, LCMScheduler
from pipelines.models import TextToImageRequest
from torch import Generator
def load_pipeline() -> StableDiffusionXLPipeline:
pipeline: StableDiffusionXLPipeline = StableDiffusionXLPipeline.from_pretrained(
"./models/newdream-sdxl-20",
torch_dtype=torch.float16,
local_files_only=True,
).to("cuda")
adapter_id = "./models/lora"
pipeline.scheduler = LCMScheduler.from_config({
"_class_name": "LCMScheduler",
"_diffusers_version": "0.29.0",
"beta_end": 0.012,
"beta_schedule": "scaled_linear",
"beta_start": 0.00085,
"clip_sample": False,
"clip_sample_range": 1.0,
"num_train_timesteps": 1000,
"original_inference_steps": 300,
"prediction_type": "epsilon",
"rescale_betas_zero_snr": False,
"sample_max_value": 1.0,
"set_alpha_to_one": False,
"steps_offset": 1,
"thresholding": False,
"timestep_scaling": 10.0,
"timestep_spacing": "leading",
"trained_betas": None,
})
# # LCMScheduler.from_config(pipeline.scheduler.config)
pipeline.load_lora_weights(adapter_id)
pipeline(prompt="")
return pipeline
def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
return pipeline(
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=4,
guidance_scale=1.5,
).images[0]