|
import sys, argparse, glob, os |
|
import torch |
|
import numpy as np |
|
from tqdm import tqdm |
|
import gradio as gr |
|
from PIL import Image |
|
from omegaconf import OmegaConf |
|
from einops import repeat, rearrange |
|
from pytorch_lightning import seed_everything |
|
from imwatermark import WatermarkEncoder |
|
|
|
from scripts.txt2img import put_watermark |
|
from ldm.models.diffusion.ddim import DDIMSampler |
|
from ldm.models.diffusion.ddpm import LatentUpscaleDiffusion, LatentUpscaleFinetuneDiffusion |
|
from ldm.util import exists, instantiate_from_config |
|
|
|
|
|
torch.set_grad_enabled(False) |
|
|
|
|
|
def load_model_from_config(config, ckpt, verbose=False): |
|
print(f"Loading model from {ckpt}") |
|
pl_sd = torch.load(ckpt, map_location="cpu") |
|
if "global_step" in pl_sd: |
|
print(f"Global Step: {pl_sd['global_step']}") |
|
sd = pl_sd["state_dict"] |
|
model = instantiate_from_config(config.model) |
|
m, u = model.load_state_dict(sd, strict=False) |
|
if len(m) > 0 and verbose: |
|
print("missing keys:") |
|
print(m) |
|
if len(u) > 0 and verbose: |
|
print("unexpected keys:") |
|
print(u) |
|
|
|
model.cuda() |
|
model.eval() |
|
return model |
|
|
|
|
|
def make_batch_sd( image, txt, device,num_samples=1,size=(512,512)): |
|
image = Image.open(image).convert("RGB") |
|
image = image.resize(size) |
|
image = np.array(image) |
|
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 |
|
batch = { |
|
"lr": rearrange(image, 'h w c -> 1 c h w'), |
|
"txt": num_samples * [txt], |
|
} |
|
batch["lr"] = repeat(batch["lr"].to(device=device), "1 ... -> n ...", n=num_samples) |
|
return batch |
|
|
|
|
|
def make_noise_augmentation(model, batch, noise_level=None): |
|
x_low = batch[model.low_scale_key] |
|
x_low = x_low.to(memory_format=torch.contiguous_format).float() |
|
x_aug, noise_level = model.low_scale_model(x_low, noise_level) |
|
return x_aug, noise_level |
|
|
|
|
|
def paint(sampler, image, prompt, seed, scale, h, w, steps, num_samples=1, callback=None, eta=0., noise_level=None): |
|
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
|
model = sampler.model |
|
seed_everything(seed) |
|
prng = np.random.RandomState(seed) |
|
start_code = prng.randn(num_samples, model.channels, h, w) |
|
start_code = torch.from_numpy(start_code).to(device=device, dtype=torch.float32) |
|
|
|
with torch.no_grad(), torch.autocast("cuda"): |
|
batch = make_batch_sd(image, txt=prompt, device=device, num_samples=num_samples, size=(h, w)) |
|
c = model.cond_stage_model.encode(batch["txt"]) |
|
c_cat = list() |
|
if isinstance(model, LatentUpscaleFinetuneDiffusion): |
|
for ck in model.concat_keys: |
|
cc = batch[ck] |
|
if exists(model.reshuffle_patch_size): |
|
assert isinstance(model.reshuffle_patch_size, int) |
|
cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w',p1=model.reshuffle_patch_size, p2=model.reshuffle_patch_size) |
|
c_cat.append(cc) |
|
c_cat = torch.cat(c_cat, dim=1) |
|
|
|
cond = {"c_concat": [c_cat], "c_crossattn": [c]} |
|
|
|
uc_cross = model.get_unconditional_conditioning(num_samples, "") |
|
uc_full = {"c_concat": [c_cat], "c_crossattn": [uc_cross]} |
|
elif isinstance(model, LatentUpscaleDiffusion): |
|
x_augment, noise_level = make_noise_augmentation(model, batch, noise_level) |
|
cond = {"c_concat": [x_augment], "c_crossattn": [c], "c_adm": noise_level} |
|
|
|
uc_cross = model.get_unconditional_conditioning(num_samples, "") |
|
uc_full = {"c_concat": [x_augment], "c_crossattn": [uc_cross], "c_adm": noise_level} |
|
else: |
|
raise NotImplementedError() |
|
|
|
shape = [model.channels, h, w] |
|
samples, intermediates = sampler.sample( |
|
steps, |
|
num_samples, |
|
shape, |
|
cond, |
|
verbose=False, |
|
eta=eta, |
|
unconditional_guidance_scale=scale, |
|
unconditional_conditioning=uc_full, |
|
x_T=start_code, |
|
callback=callback |
|
) |
|
with torch.no_grad(): |
|
x_samples_ddim = model.decode_first_stage(samples) |
|
result = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) |
|
result = result.cpu().numpy().transpose(0, 2, 3, 1) * 255 |
|
return Image.fromarray(result.astype(np.uint8)[0]) |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--indir", type=str, nargs="?", help="dir containing image-mask pairs (`example.png` and `example_mask.png`)",) |
|
parser.add_argument("--num_imgs", type=int, default=None, help="number of images to generate",) |
|
parser.add_argument("--steps",type=int,default=50,help="number of ddim sampling steps",) |
|
parser.add_argument("--config",type=str,default="/checkpoint/pfz/autoencoders/sd/stable-diffusion-x4-upscaler/x4-upscaling.yaml",help="path to config which constructs model",) |
|
parser.add_argument("--ckpt",type=str,default="/checkpoint/pfz/autoencoders/sd/stable-diffusion-x4-upscaler/x4-upscaler-ema.ckpt",help="path to checkpoint of model",) |
|
parser.add_argument("--ldm_decoder_ckpt",default=None,type=str,help="path to checkpoint of LDM decoder") |
|
parser.add_argument("--num_samples",default=1,type=int,help="number of samples to generate") |
|
parser.add_argument("--scale", default=10.0, type=float, help="scale") |
|
parser.add_argument("--eta", default=0.0, type=float, help="eta") |
|
parser.add_argument("--noise_level", default=20, type=float, help="eta") |
|
parser.add_argument("--output_dir",type=str,default="outputs",nargs="?",help="dir to write results to",) |
|
parser.add_argument("--height",type=int,default=512,help="height of output image",) |
|
parser.add_argument("--width",type=int,default=512,help="width of output image",) |
|
parser.add_argument("--seed",type=int,default=0,help="random seed",) |
|
opt = parser.parse_args() |
|
|
|
print(f'>>> Building LDM model with config {opt.config} and weights from {opt.ckpt}...') |
|
config = OmegaConf.load(f"{opt.config}") |
|
model = load_model_from_config(config, f"{opt.ckpt}") |
|
|
|
|
|
print(f'reload decoder weights {opt.ldm_decoder_ckpt}...') |
|
if opt.ldm_decoder_ckpt is not None and opt.ldm_decoder_ckpt.lower() == "none": |
|
opt.ldm_decoder_ckpt = None |
|
if opt.ldm_decoder_ckpt is not None: |
|
state_dict = torch.load(opt.ldm_decoder_ckpt)['ldm_decoder'] |
|
msg = model.first_stage_model.load_state_dict(state_dict, strict=False) |
|
print(msg) |
|
|
|
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
|
model = model.to(device) |
|
model.eval() |
|
sampler = DDIMSampler(model) |
|
|
|
os.makedirs(opt.output_dir, exist_ok=True) |
|
|
|
images = sorted(glob.glob(os.path.join(opt.indir, "*.png"))) + sorted(glob.glob(os.path.join(opt.indir, "*.jpg"))) + sorted(glob.glob(os.path.join(opt.indir, "*.jpeg"))) |
|
images += sorted(glob.glob(os.path.join(opt.indir, "*.PNG"))) + sorted(glob.glob(os.path.join(opt.indir, "*.JPG"))) + sorted(glob.glob(os.path.join(opt.indir, "*.JPEG"))) |
|
print(f"Found {len(images)} inputs.") |
|
|
|
counter = 0 |
|
for image in tqdm(images): |
|
if opt.num_imgs is not None and counter >= opt.num_imgs: |
|
break |
|
noise_level = torch.Tensor( opt.num_samples * [opt.noise_level]).to(sampler.model.device).long() |
|
sampler.make_schedule(opt.steps, ddim_eta=opt.eta, verbose=True) |
|
result = paint( |
|
sampler=sampler, |
|
image=image, |
|
prompt="", |
|
seed=opt.seed, |
|
scale=opt.scale, |
|
h=opt.height, w=opt.width, steps=opt.steps, |
|
num_samples=opt.num_samples, |
|
callback=None, |
|
noise_level=noise_level |
|
) |
|
outpath = os.path.join(opt.output_dir, os.path.split(image)[1]).replace('.jpg', '.png').replace('.jpeg', '.png').replace('.JPG', '.png').replace('.JPEG', '.png') |
|
result.save(outpath) |
|
counter += 1 |
|
|