File size: 1,761 Bytes
7d66be2 f18e889 0807d69 f18e889 0807d69 7a19876 7d66be2 f18e889 0807d69 f18e889 0807d69 1cb9eb1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
import torch
from PIL.Image import Image
from diffusers import StableDiffusionXLPipeline, LCMScheduler
from pipelines.models import TextToImageRequest
from torch import Generator
def load_pipeline() -> StableDiffusionXLPipeline:
pipeline: StableDiffusionXLPipeline = StableDiffusionXLPipeline.from_pretrained(
"./models/newdream-sdxl-20",
torch_dtype=torch.float16,
local_files_only=True,
).to("cuda")
adapter_id = "./models/lora"
pipeline.scheduler = LCMScheduler.from_config({
"_class_name": "LCMScheduler",
"_diffusers_version": "0.29.0",
"beta_end": 0.012,
"beta_schedule": "scaled_linear",
"beta_start": 0.00085,
"clip_sample": False,
"clip_sample_range": 1.0,
"num_train_timesteps": 1000,
"original_inference_steps": 300,
"prediction_type": "epsilon",
"rescale_betas_zero_snr": False,
"sample_max_value": 1.0,
"set_alpha_to_one": False,
"steps_offset": 1,
"thresholding": False,
"timestep_scaling": 10.0,
"timestep_spacing": "leading",
"trained_betas": None,
})
# # LCMScheduler.from_config(pipeline.scheduler.config)
pipeline.load_lora_weights(adapter_id)
pipeline(prompt="")
return pipeline
def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
return pipeline(
prompt=request.prompt,
negative_prompt=request.negative_prompt,
width=request.width,
height=request.height,
generator=generator,
num_inference_steps=4,
guidance_scale=1.5,
).images[0]
|