import os os.system("git clone --recursive https://github.com/JD-P/cloob-latent-diffusion") os.system("cd cloob-latent-diffusion;pip install omegaconf pillow pytorch-lightning einops wandb ftfy regex ./CLIP") import argparse from functools import partial from pathlib import Path import sys sys.path.append('./cloob-latent-diffusion') sys.path.append('./cloob-latent-diffusion/cloob-training') sys.path.append('./cloob-latent-diffusion/latent-diffusion') sys.path.append('./cloob-latent-diffusion/taming-transformers') sys.path.append('./cloob-latent-diffusion/v-diffusion-pytorch') from omegaconf import OmegaConf from PIL import Image import torch from torch import nn from torch.nn import functional as F from torchvision import transforms from torchvision.transforms import functional as TF from tqdm import trange from CLIP import clip from cloob_training import model_pt, pretrained import ldm.models.autoencoder from diffusion import sampling, utils import train_latent_diffusion as train from huggingface_hub import hf_hub_url, cached_download import random # Download the model files checkpoint = cached_download(hf_hub_url("huggan/distill-ccld-wa", filename="model_student.ckpt")) ae_model_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.ckpt")) ae_config_path = cached_download(hf_hub_url("huggan/ccld_wa", filename="ae_model.yaml")) # Define a few utility functions def parse_prompt(prompt, default_weight=3.): if prompt.startswith('http://') or prompt.startswith('https://'): vals = prompt.rsplit(':', 2) vals = [vals[0] + ':' + vals[1], *vals[2:]] else: vals = prompt.rsplit(':', 1) vals = vals + ['', default_weight][len(vals):] return vals[0], float(vals[1])