diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..7c2df37d12c11576fcb81a51221ec37d22f2352a --- /dev/null +++ b/.gitignore @@ -0,0 +1,172 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +*/.ipynb_checkpoints/* + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# .idea +.idea/ +/idea/ +*.ipr +*.iml +*.iws + +# macos system +.DS_Store + +### project ### + +# /diffvg/ +# big-lama* + +# pytorch-lighting logs +lightning_logs/* + +# Edit settings +.editorconfig + +# model checkpoint +/checkpoint/u2net/u2net.pth +!/checkpoint/placeholder.md + +# ignore local results +/workspace/ +.workspace/ + +# ignore files +./tmp/ +./tmp/* +/tmp/ +/tmp_select/ +/tmp_select/* \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..1a42887846f4bc554d92398750dc28bb97542cbf --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "diffvg"] + path = diffvg + url = https://github.com/BachiLi/diffvg.git diff --git a/ImageReward/ImageReward.py b/ImageReward/ImageReward.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcd9a1216ba000c778069380ee77a9f4dabd28b --- /dev/null +++ b/ImageReward/ImageReward.py @@ -0,0 +1,177 @@ +''' +@File : ImageReward.py +@Time : 2023/01/28 19:53:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: ImageReward Reward model. +* Based on CLIP code base and improved-aesthetic-predictor code base +* https://github.com/openai/CLIP +* https://github.com/christophschuhmann/improved-aesthetic-predictor +''' + +import os +import torch +import torch.nn as nn +from PIL import Image +from .models.BLIP.blip_pretrain import BLIP_Pretrain +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize + +try: + from torchvision.transforms import InterpolationMode + + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +class MLP(nn.Module): + def __init__(self, input_size): + super().__init__() + self.input_size = input_size + + self.layers = nn.Sequential( + nn.Linear(self.input_size, 1024), + # nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(1024, 128), + # nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(128, 64), + # nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(64, 16), + # nn.ReLU(), + nn.Linear(16, 1) + ) + + # initial MLP param + for name, param in self.layers.named_parameters(): + if 'weight' in name: + nn.init.normal_(param, mean=0.0, std=1.0 / (self.input_size + 1)) + if 'bias' in name: + nn.init.constant_(param, val=0) + + def forward(self, input): + return self.layers(input) + + +class ImageReward(nn.Module): + def __init__(self, med_config, device='cpu'): + super().__init__() + self.device = device + + self.blip = BLIP_Pretrain(image_size=224, vit='large', med_config=med_config) + self.preprocess = _transform(224) + self.mlp = MLP(768) + + self.mean = 0.16717362830052426 + self.std = 1.0333394966054072 + + def score_gard(self, prompt_ids, prompt_attention_mask, image): + + image_embeds = self.blip.visual_encoder(image) + # text encode cross attention with image + image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(self.device) + text_output = self.blip.text_encoder(prompt_ids, + attention_mask=prompt_attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + txt_features = text_output.last_hidden_state[:, 0, :] # (feature_dim) + rewards = self.mlp(txt_features) + rewards = (rewards - self.mean) / self.std + + return rewards + + def score(self, prompt, image): + + if (type(image).__name__ == 'list'): + _, rewards = self.inference_rank(prompt, image) + return rewards + + # text encode + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, + return_tensors="pt").to(self.device) + + # image encode + if isinstance(image, Image.Image): + pil_image = image + elif isinstance(image, str): + if os.path.isfile(image): + pil_image = Image.open(image) + else: + raise TypeError( + r'This image parameter type has not been supportted yet. Please pass PIL.Image or file path str.') + + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + + # text encode cross attention with image + image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, + attention_mask=text_input.attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + txt_features = text_output.last_hidden_state[:, 0, :].float() # (feature_dim) + rewards = self.mlp(txt_features) + rewards = (rewards - self.mean) / self.std + + return rewards.detach().cpu().numpy().item() + + def inference_rank(self, prompt, generations_list): + + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, + return_tensors="pt").to(self.device) + + txt_set = [] + for generation in generations_list: + # image encode + if isinstance(generation, Image.Image): + pil_image = generation + elif isinstance(generation, str): + if os.path.isfile(generation): + pil_image = Image.open(generation) + else: + raise TypeError( + r'This image parameter type has not been supportted yet. Please pass PIL.Image or file path str.') + + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + + # text encode cross attention with image + image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, + attention_mask=text_input.attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True) + txt_set.append(text_output.last_hidden_state[:, 0, :]) + + txt_features = torch.cat(txt_set, 0).float() # [image_num, feature_dim] + rewards = self.mlp(txt_features) # [image_num, 1] + rewards = (rewards - self.mean) / self.std + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() diff --git a/ImageReward/ReFL.py b/ImageReward/ReFL.py new file mode 100644 index 0000000000000000000000000000000000000000..d8718b3b5e0cf605219b3931f20b043bcd67c6c9 --- /dev/null +++ b/ImageReward/ReFL.py @@ -0,0 +1,830 @@ +''' +@File : ReFL.py +@Time : 2023/05/01 19:36:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: ReFL Algorithm. +* Based on diffusers code base +* https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py +''' + +import argparse +import logging +import math +import os +import random +from pathlib import Path + +import accelerate +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import ProjectConfiguration, set_seed +from datasets import load_dataset +from huggingface_hub import create_repo, upload_folder +from packaging import version +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +from PIL import Image +import ImageReward as RM + +from torchvision.transforms import Compose, Resize, CenterCrop, Normalize + +try: + from torchvision.transforms import InterpolationMode + + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +from diffusers.utils import check_min_version, deprecate +from diffusers.utils.import_utils import is_xformers_available + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.16.0.dev0") + +logger = get_logger(__name__, log_level="INFO") + +DATASET_NAME_MAPPING = { + "refl": ("image", "text"), +} + + +def parse_args(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--grad_scale", type=float, default=1e-3, help="Scale divided for grad loss value." + ) + parser.add_argument( + "--input_pertubation", type=float, default=0, help="The scale of input pretubation. Recommended 0.1." + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--validation_prompts", + type=str, + default=None, + nargs="+", + help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), + ) + parser.add_argument( + "--output_dir", + type=str, + default="checkpoint/refl", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + default=False, + action="store_true", + help=( + "Whether to center crop the input images to the resolution. If not set, the images will be randomly" + " cropped. The images will be resized to the resolution first before cropping." + ), + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_batch_size", type=int, default=2, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=100, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=4, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--snr_gamma", + type=float, + default=None, + help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " + "More details here: https://arxiv.org/abs/2303.09556.", + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") + parser.add_argument( + "--non_ema_revision", + type=str, + default=None, + required=False, + help=( + "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" + " remote repository specified with --pretrained_model_name_or_path." + ), + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=0, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--checkpointing_steps", + type=int, + default=100, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=( + "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." + " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" + " for more docs" + ), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") + parser.add_argument( + "--validation_epochs", + type=int, + default=5, + help="Run validation every X epochs.", + ) + parser.add_argument( + "--tracker_project_name", + type=str, + default="text2image-refl", + help=( + "The `project_name` argument passed to Accelerator.init_trackers for" + " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" + ), + ) + + args = parser.parse_args() + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # default to using the same revision for the non-ema model if not specified + if args.non_ema_revision is None: + args.non_ema_revision = args.revision + + return args + + +class Trainer(object): + + def __init__(self, pretrained_model_name_or_path, train_data_dir, args): + + self.pretrained_model_name_or_path = pretrained_model_name_or_path + self.train_data_dir = train_data_dir + + # Sanity checks + if args.dataset_name is None and self.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + if args.non_ema_revision is not None: + deprecate( + "non_ema_revision!=None", + "0.15.0", + message=( + "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" + " use `--variant=non_ema` instead." + ), + ) + logging_dir = os.path.join(args.output_dir, args.logging_dir) + + accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) + + self.accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + logging_dir=logging_dir, + project_config=accelerator_project_config, + ) + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(self.accelerator.state, main_process_only=False) + if self.accelerator.is_local_main_process: + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if self.accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + self.repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + + # Load scheduler, tokenizer and models. + self.noise_scheduler = DDPMScheduler.from_pretrained(self.pretrained_model_name_or_path, subfolder="scheduler") + tokenizer = CLIPTokenizer.from_pretrained( + self.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision + ) + self.text_encoder = CLIPTextModel.from_pretrained( + self.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision + ) + self.vae = AutoencoderKL.from_pretrained(self.pretrained_model_name_or_path, subfolder="vae", + revision=args.revision) + self.unet = UNet2DConditionModel.from_pretrained( + self.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision + ) + self.reward_model = RM.load("ImageReward-v1.0", device=self.accelerator.device) + + # Freeze vae and text_encoder + self.vae.requires_grad_(False) + self.text_encoder.requires_grad_(False) + self.reward_model.requires_grad_(False) + + # Create EMA for the unet. + if args.use_ema: + self.ema_unet = UNet2DConditionModel.from_pretrained( + self.pretrained_model_name_or_path, subfolder="unet", revision=args.revision + ) + self.ema_unet = EMAModel(self.ema_unet.parameters(), model_cls=UNet2DConditionModel, + model_config=self.ema_unet.config) + + if args.enable_xformers_memory_efficient_attention: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + logger.warn( + "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + self.unet.enable_xformers_memory_efficient_attention() + else: + raise ValueError("xformers is not available. Make sure it is installed correctly") + + # `accelerate` 0.16.0 will have better support for customized saving + if version.parse(accelerate.__version__) >= version.parse("0.16.0"): + # create custom saving & loading hooks so that `self.accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if args.use_ema: + self.ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) + + for i, model in enumerate(models): + model.save_pretrained(os.path.join(output_dir, "unet")) + + # make sure to pop weight so that corresponding model is not saved again + weights.pop() + + def load_model_hook(models, input_dir): + if args.use_ema: + load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) + self.ema_unet.load_state_dict(load_model.state_dict()) + self.ema_unet.to(self.accelerator.device) + del load_model + + for i in range(len(models)): + # pop models so that they are not loaded again + model = models.pop() + + # load diffusers style into model + load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") + model.register_to_config(**load_model.config) + + model.load_state_dict(load_model.state_dict()) + del load_model + + self.accelerator.register_save_state_pre_hook(save_model_hook) + self.accelerator.register_load_state_pre_hook(load_model_hook) + + if args.gradient_checkpointing: + self.unet.enable_gradient_checkpointing() + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * self.accelerator.num_processes + ) + + # Initialize the optimizer + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" + ) + + optimizer_cls = bnb.optim.AdamW8bit + else: + optimizer_cls = torch.optim.AdamW + + self.optimizer = optimizer_cls( + self.unet.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # Get the datasets: you can either provide your own training and evaluation files (see below) + # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). + + # In distributed training, the load_dataset function guarantees that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, + args.dataset_config_name, + cache_dir=args.cache_dir, + ) + else: + data_files = {} + data_files["train"] = self.train_data_dir + dataset = load_dataset( + "json", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # Get the column names for input/target. + dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + # We need to tokenize input captions and transform the images. + def tokenize_captions(examples, is_train=True): + captions = [] + for caption in examples[caption_column]: + if isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + else: + raise ValueError( + f"Caption column `{caption_column}` should contain either strings or lists of strings." + ) + inputs = tokenizer( + captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, + return_tensors="pt" + ) + return inputs.input_ids + + def preprocess_train(examples): + examples["input_ids"] = tokenize_captions(examples) + examples["rm_input_ids"] = self.reward_model.blip.tokenizer(examples[caption_column], padding='max_length', + truncation=True, max_length=35, + return_tensors="pt").input_ids + examples["rm_attention_mask"] = self.reward_model.blip.tokenizer(examples[caption_column], + padding='max_length', truncation=True, + max_length=35, + return_tensors="pt").attention_mask + return examples + + with self.accelerator.main_process_first(): + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + self.train_dataset = dataset["train"].with_transform(preprocess_train) + + def collate_fn(examples): + input_ids = torch.stack([example["input_ids"] for example in examples]) + rm_input_ids = torch.stack([example["rm_input_ids"] for example in examples]) + rm_attention_mask = torch.stack([example["rm_attention_mask"] for example in examples]) + input_ids = input_ids.view(-1, input_ids.shape[-1]) + rm_input_ids = rm_input_ids.view(-1, rm_input_ids.shape[-1]) + rm_attention_mask = rm_attention_mask.view(-1, rm_attention_mask.shape[-1]) + return {"input_ids": input_ids, "rm_input_ids": rm_input_ids, "rm_attention_mask": rm_attention_mask} + + # DataLoaders creation: + self.train_dataloader = torch.utils.data.DataLoader( + self.train_dataset, + shuffle=True, + collate_fn=collate_fn, + batch_size=args.train_batch_size, + num_workers=args.dataloader_num_workers, + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + self.num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * self.num_update_steps_per_epoch + overrode_max_train_steps = True + + self.lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=self.optimizer, + num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + # Prepare everything with our `self.accelerator`. + self.unet, self.optimizer, self.train_dataloader, self.lr_scheduler = self.accelerator.prepare( + self.unet, self.optimizer, self.train_dataloader, self.lr_scheduler + ) + + if args.use_ema: + self.ema_unet.to(self.accelerator.device) + + # For mixed precision training we cast the text_encoder and vae weights to half-precision + # as these models are only used for inference, keeping weights in full precision is not required. + self.weight_dtype = torch.float32 + if self.accelerator.mixed_precision == "fp16": + self.weight_dtype = torch.float16 + elif self.accelerator.mixed_precision == "bf16": + self.weight_dtype = torch.bfloat16 + + # Move text_encode and vae to gpu and cast to self.weight_dtype + self.text_encoder.to(self.accelerator.device, dtype=self.weight_dtype) + self.vae.to(self.accelerator.device, dtype=self.weight_dtype) + self.reward_model.to(self.accelerator.device, dtype=self.weight_dtype) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + self.num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * self.num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / self.num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if self.accelerator.is_main_process: + tracker_config = dict(vars(args)) + tracker_config.pop("validation_prompts") + self.accelerator.init_trackers(args.tracker_project_name, tracker_config) + + def train(self, args): + + # Train! + total_batch_size = args.train_batch_size * self.accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(self.train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + self.accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + else: + self.accelerator.print(f"Resuming from checkpoint {path}") + self.accelerator.load_state(os.path.join(args.output_dir, path)) + global_step = int(path.split("-")[1]) + + resume_global_step = global_step * args.gradient_accumulation_steps + first_epoch = global_step // self.num_update_steps_per_epoch + resume_step = resume_global_step % (self.num_update_steps_per_epoch * args.gradient_accumulation_steps) + + # Only show the progress bar once on each machine. + progress_bar = tqdm(range(global_step, args.max_train_steps), + disable=not self.accelerator.is_local_main_process) + progress_bar.set_description("Steps") + + for epoch in range(first_epoch, args.num_train_epochs): + self.unet.train() + train_loss = 0.0 + for step, batch in enumerate(self.train_dataloader): + # Skip steps until we reach the resumed step + if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: + if step % args.gradient_accumulation_steps == 0: + progress_bar.update(1) + continue + + with self.accelerator.accumulate(self.unet): + encoder_hidden_states = self.text_encoder(batch["input_ids"])[0] + latents = torch.randn((args.train_batch_size, 4, 64, 64), device=self.accelerator.device) + + self.noise_scheduler.set_timesteps(40, device=self.accelerator.device) + timesteps = self.noise_scheduler.timesteps + + mid_timestep = random.randint(30, 39) + + for i, t in enumerate(timesteps[:mid_timestep]): + with torch.no_grad(): + latent_model_input = latents + latent_model_input = self.noise_scheduler.scale_model_input(latent_model_input, t) + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=encoder_hidden_states, + ).sample + latents = self.noise_scheduler.step(noise_pred, t, latents).prev_sample + + latent_model_input = latents + latent_model_input = self.noise_scheduler.scale_model_input(latent_model_input, + timesteps[mid_timestep]) + noise_pred = self.unet( + latent_model_input, + timesteps[mid_timestep], + encoder_hidden_states=encoder_hidden_states, + ).sample + pred_original_sample = self.noise_scheduler.step(noise_pred, timesteps[mid_timestep], + latents).pred_original_sample.to(self.weight_dtype) + + pred_original_sample = 1 / self.vae.config.scaling_factor * pred_original_sample + image = self.vae.decode(pred_original_sample.to(self.weight_dtype)).sample + image = (image / 2 + 0.5).clamp(0, 1) + + # image encode + def _transform(): + return Compose([ + Resize(224, interpolation=BICUBIC), + CenterCrop(224), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + rm_preprocess = _transform() + image = rm_preprocess(image).to(self.accelerator.device) + + rewards = self.reward_model.score_gard(batch["rm_input_ids"], batch["rm_attention_mask"], image) + loss = F.relu(-rewards + 2) + loss = loss.mean() * args.grad_scale + + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = self.accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + self.accelerator.backward(loss) + if self.accelerator.sync_gradients: + self.accelerator.clip_grad_norm_(self.unet.parameters(), args.max_grad_norm) + self.optimizer.step() + self.lr_scheduler.step() + self.optimizer.zero_grad() + + # Checks if the self.accelerator has performed an optimization step behind the scenes + if self.accelerator.sync_gradients: + if args.use_ema: + self.ema_unet.step(self.unet.parameters()) + progress_bar.update(1) + global_step += 1 + self.accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + + if global_step % args.checkpointing_steps == 0: + if self.accelerator.is_main_process: + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + self.accelerator.save_state(save_path) + logger.info(f"Saved state to {save_path}") + + logs = {"step_loss": loss.detach().item(), "lr": self.lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break + + if self.accelerator.is_main_process: + if args.validation_prompts is not None and epoch % args.validation_epochs == 0: + if args.use_ema: + # Store the UNet parameters temporarily and load the EMA parameters to perform inference. + self.ema_unet.store(self.unet.parameters()) + self.ema_unet.copy_to(self.unet.parameters()) + if args.use_ema: + # Switch back to the original UNet parameters. + self.ema_unet.restore(self.unet.parameters()) + + # Create the pipeline using the trained modules and save it. + self.accelerator.wait_for_everyone() + if self.accelerator.is_main_process: + self.unet = self.accelerator.unwrap_model(self.unet) + if args.use_ema: + self.ema_unet.copy_to(self.unet.parameters()) + + pipeline = StableDiffusionPipeline.from_pretrained( + self.pretrained_model_name_or_path, + text_encoder=self.text_encoder, + vae=self.vae, + unet=self.unet, + revision=args.revision, + ) + pipeline.save_pretrained(args.output_dir) + + if args.push_to_hub: + upload_folder( + repo_id=self.repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + + self.accelerator.end_training() diff --git a/ImageReward/__init__.py b/ImageReward/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ec7d51d8e7417474542f05883398390c37a6ba6 --- /dev/null +++ b/ImageReward/__init__.py @@ -0,0 +1,3 @@ +from .utils import * +from .models import * +from .ReFL import * \ No newline at end of file diff --git a/ImageReward/models/AestheticScore.py b/ImageReward/models/AestheticScore.py new file mode 100644 index 0000000000000000000000000000000000000000..aeefd0f515e803085b16dda2497b34babe5c684e --- /dev/null +++ b/ImageReward/models/AestheticScore.py @@ -0,0 +1,95 @@ +''' +@File : AestheticScore.py +@Time : 2023/02/12 14:54:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: AestheticScore. +* Based on improved-aesthetic-predictor code base +* https://github.com/christophschuhmann/improved-aesthetic-predictor +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +import clip + + +# if you changed the MLP architecture during training, change it also here: +class MLP(nn.Module): + def __init__(self, input_size): + super().__init__() + self.input_size = input_size + self.layers = nn.Sequential( + nn.Linear(self.input_size, 1024), + # nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(1024, 128), + # nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(128, 64), + # nn.ReLU(), + nn.Dropout(0.1), + + nn.Linear(64, 16), + # nn.ReLU(), + + nn.Linear(16, 1) + ) + + def forward(self, x): + return self.layers(x) + + +class AestheticScore(nn.Module): + def __init__(self, download_root, device='cpu'): + super().__init__() + self.device = device + self.clip_model, self.preprocess = clip.load("ViT-L/14", device=self.device, jit=False, + download_root=download_root) + self.mlp = MLP(768) + + if device == "cpu": + self.clip_model.float() + else: + clip.model.convert_weights( + self.clip_model) # Actually this line is unnecessary since clip by default already on float16 + + # have clip.logit_scale require no grad. + self.clip_model.logit_scale.requires_grad_(False) + + def score(self, prompt, image_path): + + if (type(image_path).__name__ == 'list'): + _, rewards = self.inference_rank(prompt, image_path) + return rewards + + # image encode + pil_image = Image.open(image_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)).float() + + # score + rewards = self.mlp(image_features) + + return rewards.detach().cpu().numpy().item() + + def inference_rank(self, prompt, generations_list): + + img_set = [] + for generations in generations_list: + # image encode + img_path = generations + pil_image = Image.open(img_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)) + img_set.append(image_features) + + img_features = torch.cat(img_set, 0).float() # [image_num, feature_dim] + rewards = self.mlp(img_features) + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() diff --git a/ImageReward/models/BLIP/__init__.py b/ImageReward/models/BLIP/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0a617e7dda333d40ed10207f44ccc3857fb18ad4 --- /dev/null +++ b/ImageReward/models/BLIP/__init__.py @@ -0,0 +1 @@ +from .blip_pretrain import * \ No newline at end of file diff --git a/ImageReward/models/BLIP/blip.py b/ImageReward/models/BLIP/blip.py new file mode 100644 index 0000000000000000000000000000000000000000..0dfdb72ab619587b62357904349358b221f631e4 --- /dev/null +++ b/ImageReward/models/BLIP/blip.py @@ -0,0 +1,70 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) +''' + +import warnings +warnings.filterwarnings("ignore") + +import torch +import os +from urllib.parse import urlparse +from timm.models.hub import download_cached_file +from transformers import BertTokenizer +from .vit import VisionTransformer, interpolate_pos_embed + + +def init_tokenizer(): + tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + tokenizer.add_special_tokens({'bos_token':'[DEC]'}) + tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) + tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] + return tokenizer + + +def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): + + assert vit in ['base', 'large'], "vit parameter must be base or large" + if vit=='base': + vision_width = 768 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, + num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0 or drop_path_rate + ) + elif vit=='large': + vision_width = 1024 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, + num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0.1 or drop_path_rate + ) + return visual_encoder, vision_width + + +def is_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + +def load_checkpoint(model,url_or_filename): + if is_url(url_or_filename): + cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) + checkpoint = torch.load(cached_file, map_location='cpu') + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location='cpu') + else: + raise RuntimeError('checkpoint url or path is invalid') + + state_dict = checkpoint['model'] + + state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) + if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): + state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], + model.visual_encoder_m) + for key in model.state_dict().keys(): + if key in state_dict.keys(): + if state_dict[key].shape!=model.state_dict()[key].shape: + print(key, ": ", state_dict[key].shape, ', ', model.state_dict()[key].shape) + del state_dict[key] + + msg = model.load_state_dict(state_dict,strict=False) + print('load checkpoint from %s'%url_or_filename) + return model,msg + diff --git a/ImageReward/models/BLIP/blip_pretrain.py b/ImageReward/models/BLIP/blip_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..793cb07944810eebe1d28f26aa19482b0abcf0a5 --- /dev/null +++ b/ImageReward/models/BLIP/blip_pretrain.py @@ -0,0 +1,43 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) +''' + +import transformers +transformers.logging.set_verbosity_error() + +from torch import nn +import os +from .med import BertConfig, BertModel +from .blip import create_vit, init_tokenizer + +class BLIP_Pretrain(nn.Module): + def __init__(self, + med_config = "med_config.json", + image_size = 224, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + embed_dim = 256, + queue_size = 57600, + momentum = 0.995, + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0) + + self.tokenizer = init_tokenizer() + encoder_config = BertConfig.from_json_file(med_config) + encoder_config.encoder_width = vision_width + self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False) + + text_width = self.text_encoder.config.hidden_size + + self.vision_proj = nn.Linear(vision_width, embed_dim) + self.text_proj = nn.Linear(text_width, embed_dim) + diff --git a/ImageReward/models/BLIP/med.py b/ImageReward/models/BLIP/med.py new file mode 100644 index 0000000000000000000000000000000000000000..426f4689833d988526c6e26cd627f30975ab7606 --- /dev/null +++ b/ImageReward/models/BLIP/med.py @@ -0,0 +1,947 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +''' + +import math +from typing import Tuple + +import torch +from torch import Tensor, device, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention: + self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if mode=='multimodal': + assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """ Initialize the weights """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + + def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, + device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + if reduction=='none': + lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/ImageReward/models/BLIP/vit.py b/ImageReward/models/BLIP/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..7e5cf430090956461bc64d5ccbe427a71f50f5f2 --- /dev/null +++ b/ImageReward/models/BLIP/vit.py @@ -0,0 +1,301 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) + * Based on timm code base + * https://github.com/rwightman/pytorch-image-models/tree/master/timm +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.vision_transformer import _cfg, PatchEmbed +from timm.models.registry import register_model +from timm.models.layers import trunc_normal_, DropPath +from timm.models.helpers import named_apply, adapt_input_conv + +from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.attn_gradients = None + self.attention_map = None + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def forward(self, x, register_hook=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + if register_hook: + self.save_attention_map(attn) + attn.register_hook(self.save_attn_gradients) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if use_grad_checkpointing: + self.attn = checkpoint_wrapper(self.attn) + self.mlp = checkpoint_wrapper(self.mlp) + + def forward(self, x, register_hook=False): + x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, + use_grad_checkpointing=False, ckpt_layer=0): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) + ) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def forward(self, x, register_blk=-1): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + self.pos_embed[:,:x.size(1),:] + x = self.pos_drop(x) + + for i,blk in enumerate(self.blocks): + x = blk(x, register_blk==i) + x = self.norm(x) + + return x + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) +# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: +# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) +# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) +# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: +# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) +# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): + # interpolate position embedding + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = visual_encoder.patch_embed.num_patches + num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + + if orig_size!=new_size: + # class_token and dist_token are kept unchanged + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) + + return new_pos_embed + else: + return pos_embed_checkpoint \ No newline at end of file diff --git a/ImageReward/models/BLIPScore.py b/ImageReward/models/BLIPScore.py new file mode 100644 index 0000000000000000000000000000000000000000..a44ed3b3d1008d659559ab1643ad251dc4b80287 --- /dev/null +++ b/ImageReward/models/BLIPScore.py @@ -0,0 +1,97 @@ +''' +@File : BLIPScore.py +@Time : 2023/02/19 20:48:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: BLIPScore. +* Based on BLIP code base +* https://github.com/salesforce/BLIP +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +from ImageReward.models.BLIP.blip_pretrain import BLIP_Pretrain +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +class BLIPScore(nn.Module): + def __init__(self, med_config, device='cpu'): + super().__init__() + self.device = device + + self.preprocess = _transform(224) + self.blip = BLIP_Pretrain(image_size=224, vit='large', med_config=med_config) + + + def score(self, prompt, image_path): + + if (type(image_path).__name__=='list'): + _, rewards = self.inference_rank(prompt, image_path) + return rewards + + # text encode + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text') + txt_feature = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:])) + + # image encode + pil_image = Image.open(image_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + image_features = F.normalize(self.blip.vision_proj(image_embeds[:,0,:]), dim=-1) + + # score + rewards = torch.sum(torch.mul(txt_feature, image_features), dim=1, keepdim=True) + + return rewards.detach().cpu().numpy().item() + + + def inference_rank(self, prompt, generations_list): + + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text') + txt_feature = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:])) + + txt_set = [] + img_set = [] + for generations in generations_list: + # image encode + img_path = generations + pil_image = Image.open(img_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + image_features = F.normalize(self.blip.vision_proj(image_embeds[:,0,:]), dim=-1) + img_set.append(image_features) + txt_set.append(txt_feature) + + txt_features = torch.cat(txt_set, 0).float() # [image_num, feature_dim] + img_features = torch.cat(img_set, 0).float() # [image_num, feature_dim] + rewards = torch.sum(torch.mul(txt_features, img_features), dim=1, keepdim=True) + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() \ No newline at end of file diff --git a/ImageReward/models/CLIPScore.py b/ImageReward/models/CLIPScore.py new file mode 100644 index 0000000000000000000000000000000000000000..8aba714ed0da54704a22e9a34c4c639be9c0aec3 --- /dev/null +++ b/ImageReward/models/CLIPScore.py @@ -0,0 +1,78 @@ +''' +@File : CLIPScore.py +@Time : 2023/02/12 13:14:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: CLIPScore. +* Based on CLIP code base +* https://github.com/openai/CLIP +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +import clip + +class CLIPScore(nn.Module): + def __init__(self, download_root, device='cpu'): + super().__init__() + self.device = device + self.clip_model, self.preprocess = clip.load("ViT-L/14", device=self.device, jit=False, + download_root=download_root) + + if device == "cpu": + self.clip_model.float() + else: + clip.model.convert_weights(self.clip_model) # Actually this line is unnecessary since clip by default already on float16 + + # have clip.logit_scale require no grad. + self.clip_model.logit_scale.requires_grad_(False) + + + def score(self, prompt, image_path): + + if (type(image_path).__name__=='list'): + _, rewards = self.inference_rank(prompt, image_path) + return rewards + + # text encode + text = clip.tokenize(prompt, truncate=True).to(self.device) + txt_features = F.normalize(self.clip_model.encode_text(text)) + + # image encode + pil_image = Image.open(image_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)) + + # score + rewards = torch.sum(torch.mul(txt_features, image_features), dim=1, keepdim=True) + + return rewards.detach().cpu().numpy().item() + + + def inference_rank(self, prompt, generations_list): + + text = clip.tokenize(prompt, truncate=True).to(self.device) + txt_feature = F.normalize(self.clip_model.encode_text(text)) + + txt_set = [] + img_set = [] + for generations in generations_list: + # image encode + img_path = generations + pil_image = Image.open(img_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)) + img_set.append(image_features) + txt_set.append(txt_feature) + + txt_features = torch.cat(txt_set, 0).float() # [image_num, feature_dim] + img_features = torch.cat(img_set, 0).float() # [image_num, feature_dim] + rewards = torch.sum(torch.mul(txt_features, img_features), dim=1, keepdim=True) + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() \ No newline at end of file diff --git a/ImageReward/models/__init__.py b/ImageReward/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba230b0a38758ee78a4eba7caeedc259a1a4dbb --- /dev/null +++ b/ImageReward/models/__init__.py @@ -0,0 +1,4 @@ +from .AestheticScore import * +from .BLIPScore import * +from .CLIPScore import * +from .BLIP import * \ No newline at end of file diff --git a/ImageReward/utils.py b/ImageReward/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f897717701a682fbcce751f8a793a74fcf39f107 --- /dev/null +++ b/ImageReward/utils.py @@ -0,0 +1,184 @@ +''' +@File : utils.py +@Time : 2023/04/05 19:18:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +* Based on CLIP code base +* https://github.com/openai/CLIP +* Checkpoint of CLIP/BLIP/Aesthetic are from: +* https://github.com/openai/CLIP +* https://github.com/salesforce/BLIP +* https://github.com/christophschuhmann/improved-aesthetic-predictor +''' + +import os +import urllib +from typing import Union, List +import pathlib + +import torch +from tqdm import tqdm +from huggingface_hub import hf_hub_download + +from .ImageReward import ImageReward +from .models.CLIPScore import CLIPScore +from .models.BLIPScore import BLIPScore +from .models.AestheticScore import AestheticScore + +_MODELS = { + "ImageReward-v1.0": "https://huggingface.co/THUDM/ImageReward/blob/main/ImageReward.pt", +} + + +def available_models() -> List[str]: + """Returns the names of available ImageReward models""" + return list(_MODELS.keys()) + + +def ImageReward_download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + download_target = os.path.join(root, filename) + hf_hub_download(repo_id="THUDM/ImageReward", filename=filename, local_dir=root) + return download_target + + +def load(name: str = "ImageReward-v1.0", + device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + download_root: str = None, + med_config_path: str = None): + """Load a ImageReward model + + Parameters + ---------- + name: str + A model name listed by `ImageReward.available_models()`, or the path to a model checkpoint containing the state_dict + device: Union[str, torch.device] + The device to put the loaded model + download_root: str + path to download the model files; by default, it uses "~/.cache/ImageReward" + med_config_path: str + + Returns + ------- + model : torch.nn.Module + The ImageReward model + """ + if name in _MODELS: + download_root = download_root or "~/.cache/ImageReward" + download_root = pathlib.Path(download_root) + model_path = pathlib.Path(download_root) / 'ImageReward.pt' + + if not model_path.exists(): + model_path = ImageReward_download(_MODELS[name], root=download_root.as_posix()) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + print('-> load ImageReward model from %s' % model_path) + state_dict = torch.load(model_path, map_location='cpu') + + # med_config + if med_config_path is None: + med_config_root = download_root or "~/.cache/ImageReward" + med_config_root = pathlib.Path(med_config_root) + med_config_path = med_config_root / 'med_config.json' + + if not med_config_path.exists(): + med_config_path = ImageReward_download("https://huggingface.co/THUDM/ImageReward/blob/main/med_config.json", + root=med_config_root.as_posix()) + print('-> load ImageReward med_config from %s' % med_config_path) + + model = ImageReward(device=device, med_config=med_config_path).to(device) + msg = model.load_state_dict(state_dict, strict=False) + model.eval() + + return model + + +_SCORES = { + "CLIP": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", + "BLIP": "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large.pth", + "Aesthetic": "https://github.com/christophschuhmann/improved-aesthetic-predictor/raw/main/sac%2Blogos%2Bava1-l14-linearMSE.pth", +} + + +def available_scores() -> List[str]: + """Returns the names of available ImageReward scores""" + return list(_SCORES.keys()) + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + return download_target + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, + unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + return download_target + + +def load_score(name: str = "CLIP", device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", + download_root: str = None): + """Load a ImageReward model + + Parameters + ---------- + name : str + A model name listed by `ImageReward.available_models()` + + device : Union[str, torch.device] + The device to put the loaded model + + download_root: str + path to download the model files; by default, it uses "~/.cache/ImageReward" + + Returns + ------- + model : torch.nn.Module + The ImageReward model + """ + model_download_root = download_root or os.path.expanduser("~/.cache/ImageReward") + + if name in _SCORES: + model_path = _download(_SCORES[name], model_download_root) + else: + raise RuntimeError(f"Score {name} not found; available scores = {available_scores()}") + + print('load checkpoint from %s' % model_path) + if name == "BLIP": + state_dict = torch.load(model_path, map_location='cpu') + med_config = ImageReward_download("https://huggingface.co/THUDM/ImageReward/blob/main/med_config.json", + model_download_root) + model = BLIPScore(med_config=med_config, device=device).to(device) + model.blip.load_state_dict(state_dict['model'], strict=False) + elif name == "CLIP": + model = CLIPScore(download_root=model_download_root, device=device).to(device) + elif name == "Aesthetic": + state_dict = torch.load(model_path, map_location='cpu') + model = AestheticScore(download_root=model_download_root, device=device).to(device) + model.mlp.load_state_dict(state_dict, strict=False) + else: + raise RuntimeError(f"Score {name} not found; available scores = {available_scores()}") + + print("checkpoint loaded") + model.eval() + + return model diff --git a/Install.md b/Install.md new file mode 100644 index 0000000000000000000000000000000000000000..375bb484201f39c193ec7f050f1cfb7386bc30b0 --- /dev/null +++ b/Install.md @@ -0,0 +1,66 @@ +## Installation + +Create a new conda environment: + +```shell +conda create --name svgrender python=3.10 +conda activate svgrender +``` + +Install pytorch and the following libraries: + +```shell +conda install pytorch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1 pytorch-cuda=11.6 -c pytorch -c nvidia +pip install hydra-core omegaconf +pip install freetype-py shapely svgutils +pip install opencv-python scikit-image matplotlib visdom wandb BeautifulSoup4 +pip install triton numba +pip install numpy scipy scikit-fmm einops timm fairscale=0.4.13 +pip install accelerate transformers safetensors datasets +``` + +Install LaMa: + +```shell +pip install easydict scikit-learn pytorch_lightning webdataset +pip install albumentations==0.5.2 +pip install kornia==0.5.0 +pip install wldhx.yadisk-direct + +cd lama +# download LaMa model weights +# raw link(deprecated): curl -L $(yadisk-direct https://disk.yandex.ru/d/kHJkc7bs7mKIVA) -o big-lama.zip +curl -O -L https://huggingface.co/xingxm/PyTorch-SVGRender-models/resolve/main/big-lama.zip +unzip big-lama.zip +``` + +Install CLIP: + +```shell +pip install ftfy regex tqdm +pip install git+https://github.com/openai/CLIP.git +``` + +Install diffusers: + +```shell +pip install diffusers==0.20.2 +``` + +Install xformers (require `python=3.10`): + +```shell +conda install xformers -c xformers +``` + +Install diffvg: + +```shell +git clone https://github.com/BachiLi/diffvg.git +cd diffvg +git submodule update --init --recursive +conda install -y -c anaconda cmake +conda install -y -c conda-forge ffmpeg +pip install svgwrite svgpathtools cssutils torch-tools +python setup.py install +``` \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..a612ad9813b006ce81d1ee438dd784da99a54007 --- /dev/null +++ b/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/README copy.md b/README copy.md new file mode 100644 index 0000000000000000000000000000000000000000..f2e87ddcb479c846116a3cf41ec5bcebc5f05515 --- /dev/null +++ b/README copy.md @@ -0,0 +1,304 @@ +--- +title: SVGRender +emoji: 💻 +colorFrom: gray +colorTo: yellow +sdk: gradio +sdk_version: 4.20.1 +python_version: 3.10.12 +app_file: app.py +pinned: false +license: apache-2.0 +--- + +

Pytorch-SVGRender

+ +

+ pyhton + license + website + docs +

+ +
+Pytorch-SVGRender +

Pytorch-SVGRender: The go-to library for differentiable rendering methods for SVG generation.

+
+

+ Updates • + Table of Contents • + Installation • + Quickstart • + FAQ • + TODO • + Acknowledgment • + Citation • + Licence +

+ +

Recent Updates

+ +- [12/2023] 🔥 We open-sourced Pytorch-SVGRender V1.0. + +

Table of Contents

+

▴ Back to top

+ +### 1. Image Vectorization + +- DiffVG: Differentiable Vector Graphics Rasterization for Editing and Learning (`SIGGRAPH 2020`) + + [[Project]](https://people.csail.mit.edu/tzumao/diffvg/) [[Paper]](https://cseweb.ucsd.edu/~tzli/diffvg/diffvg.pdf) [[Code]](https://github.com/BachiLi/diffvg) + + DiffVG is a differentiable rasterizer for 2D vector graphics. **This repository is heavily based on DiffVG.** + +- LIVE: Towards Layer-wise Image Vectorization (`CVPR 2022`) + + [[Project]](https://ma-xu.github.io/LIVE/) [[Paper]](https://ma-xu.github.io/LIVE/index_files/CVPR22_LIVE_main.pdf) [[Code]](https://github.com/Picsart-AI-Research/LIVE-Layerwise-Image-Vectorization) + +- CLIPasso: Semantically-Aware Object Sketching (`SIGGRAPH 2022`) + + [[Project]](https://clipasso.github.io/clipasso/) [[Paper]](https://arxiv.org/abs/2202.05822) [[Code]](https://github.com/yael-vinker/CLIPasso) + +- CLIPascene: Scene Sketching with Different Types and Levels of Abstraction (`ICCV 2023`) + + [[Project]](https://clipascene.github.io/CLIPascene/) [[Paper]](https://arxiv.org/abs/2211.17256) [[Code]](https://github.com/yael-vinker/SceneSketch) + +### 2. Text-to-SVG Synthesis + +- CLIPDraw: Exploring Text-to-Drawing Synthesis through Language-Image Encoders (`NIPS 2022`) + + [[Paper]](https://arxiv.org/abs/2106.14843) [[Code]](https://github.com/kvfrans/clipdraw) + +- StyleCLIPDraw: Coupling Content and Style in Text-to-Drawing Synthesis + + [[Live]](https://slideslive.com/38970834/styleclipdraw-coupling-content-and-style-in-texttodrawing-synthesis?ref=account-folder-92044-folders) [[Paper]](https://arxiv.org/abs/2202.12362) [[Code]](https://github.com/pschaldenbrand/StyleCLIPDraw) + +- CLIPFont: Texture Guided Vector WordArt Generation (`BMVC 2022`) + + [[Paper]](https://bmvc2022.mpi-inf.mpg.de/0543.pdf) [[Code]](https://github.com/songyiren98/CLIPFont) + +- VectorFusion: Text-to-SVG by Abstracting Pixel-Based Diffusion Models (`CVPR 2023`) + + [[Project]](https://vectorfusion.github.io/) [[Paper]](https://openaccess.thecvf.com/content/CVPR2023/papers/Jain_VectorFusion_Text-to-SVG_by_Abstracting_Pixel-Based_Diffusion_Models_CVPR_2023_paper.pdf) + +- DiffSketcher: Text Guided Vector Sketch Synthesis through Latent Diffusion Models (`NIPS 2023`) + + [[Project]](https://ximinng.github.io/DiffSketcher-project/) [[Live]](https://neurips.cc/virtual/2023/poster/72425) [[Paper]](https://arxiv.org/abs/2306.14685) [[Code]](https://github.com/ximinng/DiffSketcher) + +- Word-As-Image for Semantic Typography (`SIGGRAPH 2023`) + + [[Project]](https://wordasimage.github.io/Word-As-Image-Page/) [[Paper]](https://arxiv.org/abs/2303.01818) [[Code]](https://github.com/Shiriluz/Word-As-Image) + +- SVGDreamer: Text Guided SVG Generation with Diffusion Model (`CVPR 2024`) + + [[Project]](https://ximinng.github.io/SVGDreamer-project/) [[Paper]](https://arxiv.org/abs/2312.16476) [[code]](https://github.com/ximinng/SVGDreamer) + +

Installation

+ +You can follow the steps below to quickly get up and running with PyTorch-SVGRender. +These steps will let you run quick inference locally. + +In the top level directory run, + +```bash +sh script/install.sh +``` + +Note: Make sure that the script file has execution **permissions** (you can give them using `chmod +x script.sh`), and +then run the script. + +For more information, please refer to +the [Install.md](https://github.com/ximinng/PyTorch-SVGRender/blob/main/Install.md). + +

Quickstart

+

▴ Back to top

+ +**For more information, [read the docs](https://pytorch-svgrender.readthedocs.io/en/latest/index.html).** + +### 1. Basic Usage + +**DiffVG** vectorizes any raster images: + +```shell +python svg_render.py x=diffvg target='./data/fallingwater.png' +# change 'num_paths' and 'num_iter' for better results +python svg_render.py x=diffvg target='./data/fallingwater.png' x.num_paths=512 x.num_iter=2000 +``` + +**LIVE** vectorizes the raster emojis images (in original PNG format): + +```shell +python svg_render.py x=live target='./data/simile.png' +# change 'num_paths' and 'schedule_each' for better results +python svg_render.py x=live target='./data/simile.png' x.num_paths=5 x.schedule_each=1 +``` + +**CLIPasso** synthesizes vectorized sketches from images: + +**note:** first download the U2Net model `sh script/download_u2net.sh`. + +```shell +python svg_render.py x=clipasso target='./data/horse.png' +``` + +**CLIPascene** synthesizes vectorized sketches from images: + +**note:** first download the U2Net model `sh script/download_u2net.sh`, and make sure the `./data/background` folder and +the `./data/scene` folder exist with target images. + +```shell +python svg_render.py x=clipascene target='ballerina.png' +``` + +**CLIPDraw** synthesizes SVGs based on text prompts: + +```shell +python svg_render.py x=clipdraw "prompt='a photo of a cat'" +``` + +**StyleCLIPDraw** synthesizes SVG based on a text prompt and a reference image: + +```shell +python svg_render.py x=styleclipdraw "prompt='a photo of a cat'" target='./data/starry.png' +``` + +**CLIPFont** styles vector fonts according to text prompts: + +```shell +python svg_render.py x=clipfont "prompt='Starry Night by Vincent van gogh'" target='./data/alphabet1.svg' +``` + +--- + +> Because the following methods rely on stable diffusion, add `diffuser.download=True` to the command the **first time** you +run the script. + +**SVGDreamer** generates various styles of SVG based on text prompts. It supports the use of six vector primitives, +including Iconography, Sketch, Pixel Art, Low-Poly, Painting, and Ink and Wash. + +```shell +# primitive: iconography +## 1. German shepherd +python svg_render.py x=svgdreamer "prompt='A colorful German shepherd in vector art. tending on artstation.'" save_step=30 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 result_path='./svgdreamer/GermanShepherd' +## 2. sydney opera house +python svg_render.py x=svgdreamer "prompt='Sydney opera house. oil painting. by Van Gogh'" save_step=30 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 x.num_paths=512 result_path='./svgdreamer/Sydney' +# primitive: low-ploy +python svg_render.py x=svgdreamer "prompt='A picture of a bald eagle. low-ploy. polygon'" x.style='low-poly' save_step=30 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 x.guidance.num_iter=1000 result_path='./svgdreamer/eagle' +# primitive: pixel-art +python svg_render.py x=svgdreamer "prompt='Darth vader with lightsaber. ultrarealistic. pixelart. trending on artstation.'" x.style='pixelart' save_step=30 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 x.guidance.num_iter=1000 result_path='./svgdreamer/DarthVader' +# primitive: painting +python svg_render.py x=svgdreamer "prompt='self portrait of Van Gogh. oil painting. cmyk portrait. multi colored. defiant and beautiful. cmyk. expressive eyes.'" x.style='painting' save_step=50 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 x.guidance.t_schedule='randint' x.num_paths=1500 result_path='./svgdreamer/VanGogh_portrait' +# primitive: sketch +python svg_render.py x=svgdreamer "prompt='A free-hand drawing of A speeding Lamborghini. black and white drawing.'" x.style='sketch' save_step=30 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 x.guidance.t_schedule='randint' x.num_paths=128 result_path='./svgdreamer/Lamborghini' +# primitive: ink and wash +python svg_render.py x=svgdreamer "prompt='Big Wild Goose Pagoda. ink style. Minimalist abstract art grayscale watercolor.'" x.style='ink' save_step=30 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 x.guidance.t_schedule='randint' x.num_paths=128 x.width=6 result_path='./svgdreamer/BigWildGoosePagoda' +``` + +**VectorFusion** synthesizes SVGs in various styles based on text prompts: + +```shell +# Iconography style +python svg_render.py x=vectorfusion x.style='iconography' "prompt='a panda rowing a boat in a pond. minimal flat 2d vector icon. lineal color. trending on artstation.'" +# PixelArt style +python svg_render.py x=vectorfusion x.style='pixelart' "prompt='a panda rowing a boat in a pond. pixel art. trending on artstation.'" +# Sketch style +python svg_render.py x=vectorfusion x.style='sketch' "prompt='a panda rowing a boat in a pond. minimal 2d line drawing. trending on artstation.'" +``` + +Following SVGDreamer, we've added three additional styles (`Paining`, `Ink and Wash` and `low-ploy`) to VectorFusion. + +**DiffSketcher** synthesizes vector sketches based on text prompts: + +```shell +# DiffSketcher +python svg_render.py x=diffsketcher "prompt='a photo of Sydney opera house'" x.token_ind=5 seed=8019 +# DiffSketcher, variable stroke width +python svg_render.py x=diffsketcher "prompt='a photo of Sydney opera house'" x.token_ind=5 x.optim_width=True seed=8019 +# DiffSketcher RGBA version +python svg_render.py x=diffsketcher "prompt='a photo of Sydney opera house'" x.token_ind=5 x.optim_width=True x.optim_rgba=True x.optim_opacity=False seed=8019 +# DiffSketcher + style transfer +python svg_render.py x=stylediffsketcher "prompt='The French Revolution. highly detailed. 8k. ornate. intricate. cinematic. dehazed. atmospheric. oil painting. by Van Gogh'" x.token_ind=4 x.num_paths=2000 target='./data/starry.png' seed=876809 +``` + +**Word-As-Image** follow a text prompt to style a letter in a word: + +```shell +# Inject the meaning of the word bunny into the 'Y' in the word 'BUNNY' +python svg_render.py x=wordasimage x.word='BUNNY' prompt='BUNNY' x.optim_letter='Y' +``` + +### 2. SDS Loss based Approach + +This is achieved by utilizing a pretrained text-to-image diffusion model as a strong image prior to supervise the +training of the PyDiffVG, enabling rendering SVG alignment with the text. This remarkable capability is fundamentally +grounded in the use of Score Distillation Sampling (SDS). SDS acts as the core mechanism that lifts raster images from +diffusion models to the SVG domain, enabling the training of SVG parameters without images. +This includes the methods VectorFusion, DiffSketcher and SVGDreamer. + +We only compare the performance of SDS, which means that no other loss is used: + +```shell +# SDS loss +python svg_render.py x=vectorfusion "prompt='a panda rowing a boat in a pond. minimal flat 2d vector icon. lineal color. trending on artstation.'" +# Input Augmentation SDS loss (LSDS loss) +python svg_render.py x=vectorfusion x.style='sketch' "prompt='an elephant. minimal 2d line drawing. trending on artstation.'" +# Input Augmentation SDS loss (ASDS loss) +python svg_render.py x=diffsketcher "prompt='an elephant. minimal 2d line drawing. trending on artstation.'" x.token_ind=2 x.sds.grad_scale=1 x.sds.num_aug=4 x.clip.vis_loss=0 x.perceptual.coeff=0 x.opacity_delta=0.3 +# Vectorized Particle-based Score Distillation (VPSD loss) +python svg_render.py x=svgdreamer "prompt='a panda rowing a boat in a pond. minimal flat 2d vector icon. lineal color. trending on artstation.'" save_step=60 x.guidance.n_particle=6 x.guidance.vsd_n_particle=4 x.guidance.phi_n_particle=2 +``` + +

FAQ

+

▴ Back to top

+ +- Q: Where can I get more scripts and visualizations? +- A: check the [pytorch-svgrender.readthedocs.io](https://pytorch-svgrender.readthedocs.io/en/latest/index.html). + +- Q: An error says HuggingFace cannot find the model in the disk cache. +- A: Add *`diffuser.download=True`* to the command for downloading model checkpoints the **first time** you run the script. + +

TODO

+

▴ Back to top

+ +- [x] integrated SVGDreamer. + +

Acknowledgement

+

▴ Back to top

+ +The project is built based on the following repository: + +[BachiLi/diffvg](https://github.com/BachiLi/diffvg), +[huggingface/diffusers](https://github.com/huggingface/diffusers), +[threestudio-project/threestudio](https://github.com/threestudio-project/threestudio), +[yael-vinker/CLIPasso](https://github.com/yael-vinker/CLIPasso), +[ximinng/DiffSketcher](https://github.com/ximinng/DiffSketcher), +[THUDM/ImageReward](https://github.com/THUDM/ImageReward), +[advimman/lama](https://github.com/advimman/lama) + +We gratefully thank the authors for their wonderful works. + +

Citation

+

▴ Back to top

+ +If you use this code for your research, please cite the following work: + +``` +@article{xing2023svgdreamer, + title={SVGDreamer: Text Guided SVG Generation with Diffusion Model}, + author={Xing, Ximing and Zhou, Haitao and Wang, Chuang and Zhang, Jing and Xu, Dong and Yu, Qian}, + journal={arXiv preprint arXiv:2312.16476}, + year={2023} +} +@inproceedings{xing2023diffsketcher, + title={DiffSketcher: Text Guided Vector Sketch Synthesis through Latent Diffusion Models}, + author={XiMing Xing and Chuang Wang and Haitao Zhou and Jing Zhang and Qian Yu and Dong Xu}, + booktitle={Thirty-seventh Conference on Neural Information Processing Systems (NeurIPS)}, + year={2023}, + url={https://openreview.net/forum?id=CY1xatvEQj} +} +``` + +

Licence

+

▴ Back to top

+ +This work is licensed under a **Mozilla Public License Version 2.0**. \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..f7d745e8d737eae1f2a7fb712f2ea66d3b4f9bde --- /dev/null +++ b/app.py @@ -0,0 +1,83 @@ +import os +import subprocess +import sys +import tempfile + +import gradio as gr +from PIL import Image + +sys.path.append('/home/user/app/code') + +# set up diffvg +# os.system('git clone https://github.com/BachiLi/diffvg.git') +os.chdir('diffvg') +os.system('git submodule update --init --recursive') +os.system('python setup.py install --user') +sys.path.append("/home/user/.local/lib/python3.10/site-packages/diffvg-0.0.1-py3.10-linux-x86_64.egg") +print("diffvg installed.") +os.chdir('/home/user/app') + + +def process_images(prompt, num_paths, token_index, seed, optimize_width=False, optimize_color=False): + with tempfile.TemporaryDirectory() as tmpdirname: + command = [ + "python", "svg_render.py", + "x=diffsketcher", + f"prompt={prompt}", + f"x.num_paths={num_paths}", + f"x.token_ind={token_index}", + f"seed={seed}", + f"x.optim_width={optimize_width}", + f"x.optim_rgba={optimize_color}", + "x.optim_opacity=False", + ] + result = subprocess.run(command, check=True) + if result.returncode == 0: + output_image = Image.open(os.path.join(tmpdirname, "final_render.png")) + return output_image + + +with gr.Blocks() as demo: + gr.Markdown("# DiffSketcher") + gr.Markdown("DiffSketcher synthesizes **vector sketches** based on **text prompts**.") + li = [ + "https://raw.githubusercontent.com/ximinng/DiffSketcher/main/img/cat.svg", + "https://raw.githubusercontent.com/ximinng/DiffSketcher/main/img/rose.svg", + "https://raw.githubusercontent.com/ximinng/DiffSketcher/main/img/elephant.svg", + "https://raw.githubusercontent.com/ximinng/DiffSketcher/main/img/elephant_silhouette.svg", + "https://raw.githubusercontent.com/ximinng/DiffSketcher/main/img/horse_width.svg", + "https://raw.githubusercontent.com/ximinng/DiffSketcher/main/img/horse_rgba.svg", + "https://ximinng.github.io/PyTorch-SVGRender-project/assets/diffsketcher/Sydney_opera.svg", + "https://ximinng.github.io/PyTorch-SVGRender-project/assets/diffsketcher/Sydney_opera_width.svg", + "https://ximinng.github.io/PyTorch-SVGRender-project/assets/diffsketcher/Sydney_opera_width_color.svg", + ] + gr.Gallery(li, columns=6) + with gr.Row(): + with gr.Column(): + text = gr.Textbox(label="prompt") + num_paths = gr.Slider(label="path number", value=96, minimum=1, maximum=500, step=1) + token_index = gr.Textbox(label="token_index", info="CLIP embedding token index. Starting from 1.") + seed = gr.Slider(0, 10000, label="random seed", value=8019) + with gr.Accordion("Selectable Inputs"): + optimize_width = gr.Checkbox(label="optimize stroke width") + optimize_color = gr.Checkbox(label="optimize stroke color") + btn = gr.Button("Synthesize") + with gr.Column(): + output = gr.Image(label="output image", height=512) + btn.click(process_images, + inputs=[text, num_paths, token_index, seed, optimize_width, optimize_color], + outputs=[output]) + gr.Markdown("## Examples") + gr.Markdown("Here are some config examples. Feel free to try your own prompts!") + gr.Examples( + inputs=[text, num_paths, token_index, seed, optimize_width, optimize_color], + outputs=[output], + fn=process_images, + examples=[ + ["A photo of Sydney opera house.", 96, 5, 8019, False, False], + ["A photo of Sydney opera house.", 96, 5, 8019, True, False], + ["A photo of Sydney opera house.", 128, 5, 8019, True, True], + ], + ) + +demo.launch() diff --git a/assets/fonts/Bell-MT.ttf b/assets/fonts/Bell-MT.ttf new file mode 100644 index 0000000000000000000000000000000000000000..3f426758c482747f1ea1573eb1df315b37e49618 Binary files /dev/null and b/assets/fonts/Bell-MT.ttf differ diff --git a/assets/fonts/DeliusUnicase-Regular.ttf b/assets/fonts/DeliusUnicase-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..96ef9f495123eb1bf220d0f3eccd7277f8195054 Binary files /dev/null and b/assets/fonts/DeliusUnicase-Regular.ttf differ diff --git a/assets/fonts/HobeauxRococeaux-Sherman.ttf b/assets/fonts/HobeauxRococeaux-Sherman.ttf new file mode 100644 index 0000000000000000000000000000000000000000..0d3c958fb3e11b099028cb0f3d7b32eff11ed3f3 Binary files /dev/null and b/assets/fonts/HobeauxRococeaux-Sherman.ttf differ diff --git a/assets/fonts/IndieFlower-Regular.ttf b/assets/fonts/IndieFlower-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..3774ef55d4dd8d0d272f602542bbbf444ebbbb23 Binary files /dev/null and b/assets/fonts/IndieFlower-Regular.ttf differ diff --git a/assets/fonts/JosefinSans-Light.ttf b/assets/fonts/JosefinSans-Light.ttf new file mode 100644 index 0000000000000000000000000000000000000000..33ae128dc1b23da5321e3c711c36e3c88b1a668e Binary files /dev/null and b/assets/fonts/JosefinSans-Light.ttf differ diff --git a/assets/fonts/KaushanScript-Regular.ttf b/assets/fonts/KaushanScript-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..bcda31429ff79f39b82283912cd26628f0c257cc Binary files /dev/null and b/assets/fonts/KaushanScript-Regular.ttf differ diff --git a/assets/fonts/LuckiestGuy-Regular.ttf b/assets/fonts/LuckiestGuy-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..02c71fafc49d4c13c844ec945ad9d4993d2eabc9 Binary files /dev/null and b/assets/fonts/LuckiestGuy-Regular.ttf differ diff --git a/assets/fonts/Noteworthy-Bold.ttf b/assets/fonts/Noteworthy-Bold.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2ad4e118fe288de23df75c8dd6c802f0461aab2c Binary files /dev/null and b/assets/fonts/Noteworthy-Bold.ttf differ diff --git a/assets/fonts/Quicksand.ttf b/assets/fonts/Quicksand.ttf new file mode 100644 index 0000000000000000000000000000000000000000..0ec221996683fb1820d5515172f71243731d0e2b Binary files /dev/null and b/assets/fonts/Quicksand.ttf differ diff --git a/assets/fonts/Saira-Regular.ttf b/assets/fonts/Saira-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..315c0f31af395af5de6c2cff2495687c2d913542 Binary files /dev/null and b/assets/fonts/Saira-Regular.ttf differ diff --git a/checkpoint/placeholder.md b/checkpoint/placeholder.md new file mode 100644 index 0000000000000000000000000000000000000000..044642894463ef7c928643a09db0d6252e472b52 --- /dev/null +++ b/checkpoint/placeholder.md @@ -0,0 +1 @@ +**place model here** \ No newline at end of file diff --git a/conf/config.yaml b/conf/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..69aa62377701e321f80c129c4abfa841707b16ef --- /dev/null +++ b/conf/config.yaml @@ -0,0 +1,56 @@ +#-----------------# +# Global Config # +#-----------------# + +# optional args +target: ~ +prompt: ~ +neg_prompt: ~ # negative prompt + +# Accelerate config +state: + cpu: False # use cpu + mprec: no # mixed precision, choices: 'no', 'fp16', 'bf16' +# wandb: False +# tensorboard: False + +# Diffusers config +diffuser: + download: True # Set this variable to True the first time it runs + force_download: False + resume_download: False + +# PyDiffVG config +diffvg: + print_timing: False + +# reproduction +seed: 951222 +# multi-run +multirun: False +srange: ~ # seed range, example: [100, 100] + +# log +result_path: './workspace' +save_step: 10 +eval_step: 10 + +# visual rendering process +mv: False # make video +framefreq: 5 # save the image interval +framerate: 24 # by adjusting the frame rate, you can control the playback speed of the output video + +# hydra setting +hydra: + help: + # app name, override to match the name your app is known by + app_name: 'SVGRender' + run: + # output directory for normal runs + # warning: make sure that the L56-58 of '/libs/engine/model_state.py' and 'dir' are modified together + dir: ./${result_path}/${x.method}-${now:%Y-%m-%d-%H-%M} + +# default settings +defaults: + - _self_ + - x: ~ \ No newline at end of file diff --git a/conf/x/clipascene.yaml b/conf/x/clipascene.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9554a73d216db71a0f523cbd1dd18412b5e6a31 --- /dev/null +++ b/conf/x/clipascene.yaml @@ -0,0 +1,87 @@ +method: 'clipascene' + +im_name: "" +image_size: 224 +u2net_path: "./checkpoint/u2net/u2net.pth" + +background_layer: 2 # 2, 8, 11 +background_div: 0.35 # 0.35, 0.5, 0.85 +background_num_iter: 1501 + +foreground_layer: 2 # 2, 8, 11 +foreground_div: 0.4 # 0.4, 0.5, 0.9 +foreground_num_iter: 600 # 1000 if foreground_layer >= 8 else 600 + +# general +target: null +output_dir: null +path_svg: "none" +mask_object: 0 +resize_obj: 0 +fix_scale: 0 +display_logs: 0 +display: 0 +test_name: "test" + +# training +num_iter: 2001 +num_stages: 1 +lr_scheduler: 0 +lr: 0.0001 +color_lr: 0.01 +width_lr: 0.0001 +color_vars_threshold: 0.0 +batch_size: 1 +save_step: 100 +eval_step: 20 +loss_mask: "none" +dilated_mask: 0 +mask_cls: None +mask_attention: 0 + +# strokes params +num_paths: 64 +width: 1.5 +control_points_per_seg: 4 +num_segments: 1 +attention_init: 1 +saliency_model: "clip" +saliency_clip_model: "ViT-B/32" +xdog_intersec: 1 +mask_object_attention: 0 +softmax_temp: 0.3 +mlp_train: 1 +width_optim: 0 +mlp_width_weights_path: "none" +mlp_points_weights_path: "none" +switch_loss: 0 +gumbel_temp: 0.2 +width_loss_weight: 0 +width_loss_type: "L1" +optimize_points: 1 +load_points_opt_weights: 0 +gradnorm: 0 +width_weights_lst: "" +ratio_loss: 0 + +# loss +percep_loss: "none" +perceptual_weight: 0 +train_with_clip: 0 +clip_weight: 0 +start_clip: 0 +num_aug_clip: 4 +include_target_in_aug: 0 +augment_both: 1 +augemntations: "affine" +noise_thresh: 0.5 +aug_scale_min: 0.7 +force_sparse: 0 +clip_conv_loss: 1 +clip_mask_loss: 0 +clip_conv_loss_type: "L2" +clip_conv_layer_weights: "0,0,1.0,1.0,0" +clip_model_name: "ViT-B/32" +clip_fc_loss_weight: 0 +clip_text_guide: 0 +text_target: None diff --git a/conf/x/clipasso.yaml b/conf/x/clipasso.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0367c9c82bdba59f7fcdab6e59b328ec949f7fd4 --- /dev/null +++ b/conf/x/clipasso.yaml @@ -0,0 +1,48 @@ +method: 'clipasso' + +image_size: 224 +mask_object: False +fix_scale: False +path_svg: ~ # if you want to load a svg file and train from it + +# train +num_iter: 2001 +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +lr_schedule: False +lr: 1 +color_lr: 0.01 +color_vars_threshold: 0.0 + +# SVG path attr +num_paths: 24 # number of strokes +width: 1.5 # stroke width +control_points_per_seg: 4 +num_segments: 1 +attention_init: 1 # if True, use the attention heads of Dino model to set the location of the initial strokes +saliency_model: "clip" +saliency_clip_model: "ViT-B/32" +xdog_intersec: 1 +mask_object_attention: 0 +softmax_temp: 0.3 +u2net_path: "./checkpoint/u2net/u2net.pth" + +# loss +percep_loss: "none" +perceptual_weight: 0 +train_with_clip: 0 +clip_weight: 0 +start_clip: 0 +num_aug_clip: 4 +include_target_in_aug: 0 +augment_both: 0 +augemntations: "affine" # can be any combination of: 'affine_noise_eraserchunks_eraser_press' +noise_thresh: 0.5 +aug_scale_min: 0.7 +force_sparse: 0 # if True, use L1 regularization on stroke's opacity to encourage small number of strokes +clip_conv_loss: 1 +clip_conv_loss_type: "L2" +clip_conv_layer_weights: "0,0,1.0,1.0,0" +clip_model_name: "RN101" +clip_fc_loss_weight: 0.1 +clip_text_guide: 0 +text_target: None \ No newline at end of file diff --git a/conf/x/clipdraw.yaml b/conf/x/clipdraw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e376efc8963726daa9f8111ff50c61feb155bc1 --- /dev/null +++ b/conf/x/clipdraw.yaml @@ -0,0 +1,20 @@ +method: 'clipdraw' + +image_size: 224 # canvas size +path_svg: ~ # if you want to load a svg file and train from it + +# train +num_iter: 1000 +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +lr_schedule: True +lr: 1 +width_lr: 0.1 +color_lr: 0.01 + +# SVG path attr +num_paths: 512 # number of strokes +max_width: 50 # stroke width +black_stroke_color: False + +# loss +num_aug: 4 \ No newline at end of file diff --git a/conf/x/clipfont.yaml b/conf/x/clipfont.yaml new file mode 100644 index 0000000000000000000000000000000000000000..774456bb38e34e4bdd541b28d6eb4475fd323323 --- /dev/null +++ b/conf/x/clipfont.yaml @@ -0,0 +1,27 @@ +method: 'clipfont' + +# optimizer +lr_base: + point: 0.1 + color: 0.01 +lr_decay_rate: 0.1 +decay_steps: [ 1000, 1500 ] +lr_schedule: False + +# train +num_iter: 200 +batch_size: 1 +font: + reinit: False + reinit_color: 'randn' # 'randn', 'randn_all', 'green' et al + +# loss +clip: + model_name: "ViT-B/32" # RN101, 'ViT-B/32', ViT-L/14 +thresh: 0.0 +num_crops: 128 +crop_size: 230 +lam_patch: 150 +lam_dir: 30 +lam_lpips: 0 +lam_l2: 0 diff --git a/conf/x/diffsketcher.yaml b/conf/x/diffsketcher.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26c6fd69a71f29e3bc7a8fc3281a7a20a9f5137c --- /dev/null +++ b/conf/x/diffsketcher.yaml @@ -0,0 +1,76 @@ +method: 'diffsketcher' + +image_size: 224 # canvas size +path_svg: ~ # if you want to load a svg file and train from it +mask_object: False # if the target image contains background, it's better to mask it out +fix_scale: False # if the target image is not squared, it is recommended to fix the scale + +# train +num_iter: 2000 +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +lr_schedule: False +lr_decay_rate: 0.1 +decay_steps: [ 1000, 1500 ] +lr: 1 +color_lr: 0.01 +color_vars_threshold: 0.0 # uncomment the code +width_lr: 0.1 +max_width: 50 # stroke width + +# stroke attrs +num_paths: 128 # number of strokes +width: 1.5 # stroke width +control_points_per_seg: 4 +num_segments: 1 +optim_opacity: True # if True, the stroke opacity is optimized +optim_width: False # if True, the stroke width is optimized +optim_rgba: False # if True, the stroke RGBA is optimized +opacity_delta: 0 # stroke pruning + +# init strokes +attention_init: True # if True, use the attention heads of Dino model to set the location of the initial strokes +xdog_intersec: True # initialize along the edge, mix XDoG and attn up +softmax_temp: 0.5 +cross_attn_res: 16 +self_attn_res: 32 +max_com: 20 +mean_comp: False +comp_idx: 0 +attn_coeff: 1.0 # attn fusion, w * cross-attn + (1-w) * self-attn +log_cross_attn: False # True if cross attn every step +u2net_path: "./checkpoint/u2net/u2net.pth" + +# ldm +model_id: "sd15" +ldm_speed_up: False +enable_xformers: True +gradient_checkpoint: False +token_ind: 5 +use_ddim: True +num_inference_steps: 100 +guidance_scale: 7.5 # sdxl default 5.0 + +# ASDS loss +sds: + crop_size: 512 + augmentations: "affine" + guidance_scale: 100 + grad_scale: 1e-6 + t_range: [ 0.05, 0.95 ] + warmup: 2000 + +clip: + model_name: "RN101" # RN101, ViT-L/14 + feats_loss_type: "l2" # clip visual loss type, conv layers + feats_loss_weights: [ 0,0,1.0,1.0,0 ] # RN based + # feats_loss_weights: [ 0,0,1.0,1.0,0,0,0,0,0,0,0,0 ] # ViT based + fc_loss_weight: 0.1 # clip visual loss, fc layer weight + augmentations: "affine" # augmentation before clip visual computation + num_aug: 4 # num of augmentation before clip visual computation + vis_loss: 1 # 1 or 0 for use or disable clip visual loss + text_visual_coeff: 0 # cosine similarity between text and img + +perceptual: + name: "lpips" # dists + lpips_net: 'vgg' + coeff: 0.2 \ No newline at end of file diff --git a/conf/x/diffvg.yaml b/conf/x/diffvg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ca9a06f80bb231e0b214d05508cfdef4dfbc76a7 --- /dev/null +++ b/conf/x/diffvg.yaml @@ -0,0 +1,18 @@ +method: 'diffvg' + +# train +num_iter: 2000 # num_iter +lr_base: + point: 1 + color: 0.01 + stroke_width: 0.1 + stroke_color: 0.01 +lr_schedule: False # use lr_schedule + +# SVG path attr +num_paths: 512 # number of paths +max_width: 5.0 # maximum width +path_type: 'unclosed' # or 'closed', using Closed curve or non-closed curve + +# loss +loss_type: 'l2' # or 'l1', 'l2', 'lpips', 'l2+lpips', loss type diff --git a/conf/x/live.yaml b/conf/x/live.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f668fbfa50d3c0bbe540b858f2af86ac66b16e7e --- /dev/null +++ b/conf/x/live.yaml @@ -0,0 +1,31 @@ +method: 'live' + +image_size: 240 # img size and canvas size + +# train +num_iter: 500 # num_iter per path group +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +lr_base: + point: 1 + color: 0.01 + bg: 0.01 + stroke_width: 0.1 + stroke_color: 0.01 +lr_schedule: True # use lr_schedule + +# SVG path attr +num_paths: 5 # number of strokes +path_schedule: 'repeat' +schedule_each: 1 # [1, 3, 5, 7] +train_stroke: False # train stroke width and color +trainable_bg: False # set the background to be trainable +width: 3 # stroke width +num_segments: 4 +segment_init: 'circle' # 'random' +radius: 5 +coord_init: 'sparse' # 'random', 'naive', place the first control point + +# loss +use_l1_loss: False +use_distance_weighted_loss: True +xing_loss_weight: 0.01 diff --git a/conf/x/styleclipdraw.yaml b/conf/x/styleclipdraw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c2c23e20272375e3f390cc7abeab562b940e21c --- /dev/null +++ b/conf/x/styleclipdraw.yaml @@ -0,0 +1,21 @@ +method: 'styleclipdraw' + +image_size: 224 # canvas size +path_svg: ~ # if you want to load an svg file and train from it + +# train +num_iter: 1000 +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +lr_schedule: True # anneal learning rate +lr: 1 +width_lr: 0.1 +color_lr: 0.01 + +# strokes +num_paths: 512 # number of strokes +max_width: 50 # stroke width +black_stroke_color: False +style_strength: 50 # How strong the style should be. 100 (max) is a lot. 0 (min) is no style. + +# loss +num_aug: 10 # Number of image augmentations \ No newline at end of file diff --git a/conf/x/stylediffsketcher.yaml b/conf/x/stylediffsketcher.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a75e7c7ce9a8acb7c4d0518bf729e44a769e660 --- /dev/null +++ b/conf/x/stylediffsketcher.yaml @@ -0,0 +1,77 @@ +method: 'stylediffsketcher' + +image_size: 224 # canvas size +path_svg: ~ # if you want to load a svg file and train from it +mask_object: False # if the target image contains background, it's better to mask it out +fix_scale: False # if the target image is not squared, it is recommended to fix the scale + +# train +num_iter: 2000 +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +lr_schedule: False +lr_decay_rate: 0.1 +decay_steps: [ 1000, 1500 ] +lr: 1 +color_lr: 0.01 +color_vars_threshold: 0.0 # uncomment the code +width_lr: 0.1 +max_width: 50 # stroke width + +# SVG path attrs +num_paths: 512 # number of strokes +width: 1.5 # init stroke width +control_points_per_seg: 4 +num_segments: 1 +optim_opacity: True # if True, the stroke opacity is optimized +optim_width: True # if True, the stroke width is optimized +optim_rgba: True # if True, the stroke RGBA is optimized + +# init strokes +attention_init: True # if True, use the attention heads of Dino model to set the location of the initial strokes +xdog_intersec: True # initialize along the edge, mix XDoG and attn up +softmax_temp: 0.4 +cross_attn_res: 16 +self_attn_res: 32 +max_com: 20 +mean_comp: False +comp_idx: 0 +attn_coeff: 1.0 # attn fusion, w * cross-attn + (1-w) * self-attn +log_cross_attn: False +u2net_path: "./checkpoint/u2net/u2net.pth" + +# ldm +model_id: "sd15" +ldm_speed_up: False +enable_xformers: True +gradient_checkpoint: False +token_ind: 5 +use_ddim: True +num_inference_steps: 100 +guidance_scale: 7.5 + +# ASDS loss +sds: + crop_size: 512 + augmentations: "affine" + guidance_scale: 100 + grad_scale: 0 + t_range: [ 0.05, 0.95 ] + warmup: 120 + +clip: + model_name: "RN101" # RN101, ViT-L/14 + feats_loss_type: "l2" # clip visual loss type, conv layers + feats_loss_weights: [ 0,0,1.0,1.0,0 ] # RN based + # feats_loss_weights: [ 0,0,1.0,1.0,0,0,0,0,0,0,0,0 ] # ViT based + fc_loss_weight: 0.1 # clip visual loss, fc layer weight + augmentations: "affine_norm" # augmentation before clip visual computation, affine_norm_trivial + num_aug: 4 # num of augmentation before clip visual computation + vis_loss: 1 # 1 or 0 for use or disable clip visual loss + text_visual_coeff: 0 # cosine similarity between text and img + +perceptual: + name: "lpips" # dists + lpips_net: 'vgg' + coeff: 0.2 + +style_strength: 1 # How strong the style should be. 100 (max) is a lot. 0 (min) is no style. \ No newline at end of file diff --git a/conf/x/svgdreamer.yaml b/conf/x/svgdreamer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8c554f62e28043e28cf77e96b41be6040cf17a73 --- /dev/null +++ b/conf/x/svgdreamer.yaml @@ -0,0 +1,122 @@ +method: "svgdreamer" + +image_size: 600 # canvas size +path_svg: ~ # if you want to load a svg file and train from it +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +skip_sive: True # optimize from scratch without SIVE init +color_init: 'rand' # if skip_live=True, then use color_init to init target_img +style: "iconography" # "iconography", "pixelart", "low-poly", "painting", "sketch", "ink" + +# lr and optim +lr_stage_one: # SIVE stage + point: 1 # control points + width: 0.1 # stroke width + color: 0.01 # fill color and stroke color + bg: 0.01 # bg in render_warp + optim: + name: 'adam' + betas: [ 0.9, 0.9 ] + eps: 1e-6 + lr_schedule: True # use lr_scheduler + schedule: + name: 'linear' + keep_ratio: 0.2 + decay_ratio: 0.4 +lr_stage_two: # VPSD stage + point: 1 + width: 0.1 + color: 0.01 + bg: 0.01 + lr_schedule: True # use lr_scheduler + optim: + name: 'adam' + betas: [ 0.9, 0.9 ] + eps: 1e-6 + schedule: + name: 'cosine' + warmup_steps: 10 + warmup_start_lr: 0.02 + warmup_end_lr: 0.8 + cosine_end_lr: 0.4 + +# primitives +num_paths: 256 # number of strokes +trainable_bg: False # set the background to be trainable +width: 3 # stroke width +num_segments: 4 +segment_init: 'circle' # 'random' +radius: 20 +coord_init: 'random' # 'random', 'naive', place the first control point +grid: 50 # divide the canvas into n grids +path_reinit: # reinitializing paths + use: True + freq: 100 # every 50 iterations + stop_step: 1000 # for VPSD fine-tuning + opacity_threshold: 0.05 + area_threshold: 64 + +# diffusion +model_id: "sd21b" # sd14, sd15, sd21, sd21b, sdxl +ldm_speed_up: False +enable_xformers: True +gradient_checkpoint: False +cpu_offload: True +num_inference_steps: 50 +guidance_scale: 7.5 # sdxl default 5.0 +K: 4 +lora_path: ~ + +# VPSD loss +guidance: + use: True + type: 'vpsd' + n_particle: 1 # 4, 8, 16 + vsd_n_particle: 1 # the batch size of particles + particle_aug: False # do data enhancement for the input particles + num_iter: 2000 # total iterations + guidance_scale: 7.5 # CFG value + grad_scale: 1.0 # increase or decrease the gradient + grad_clip_val: ~ # eg: 10, clip the gradient of VPSD + t_range: [ 0.02, 0.98 ] + # 'randint': random time steps, this may have a more authentic style. + # 'max_0.5_900': annealing from 0.98 to 0.5 after 900 steps, this may have a more colorful results. + t_schedule: 'max_0.5_1000' # or 'randint' + # phi model config + phi_single: False # if False new an unet model to estimate noise + phi_model: 'lora' # 'lora', 'unet_simple' + use_attn_scale: ${x.guidance.phi_single} # use lora_attn_scale or not + lora_attn_scale: 1.0 # the scale of the attn based lora layer + phi_guidance_scale: 1.0 + phi_t: False # different t for phi fine-tuning + phi_update_step: 1 # enable multi-update phi model or not + phi_lr: 0.0001 # learning rate of phi model + phi_scheduler: 'ddim' # 'dpm-solver' + phi_n_particle: 1 # the batch size of phi_model + # ReFL config + phi_ReFL: False # enable reward feed back learning + n_phi_sample: 1 # number of samples used in ReFL + phi_sample_step: 200 # the phi log step + phi_infer_step: 50 # the phi num_inference_steps + # phi model optim + phi_optim: + name: 'adamw' + betas: [ 0.9, 0.999 ] + eps: 1e-8 + weight_decay: ~ # 1e-5 + # phi model lr learning schedule + phi_schedule: + use: False + name: 'cosine' + warmup_steps: 50 + warmup_start_lr: 0.00001 + warmup_end_lr: 0.0001 + total_step: 800 + cosine_end_lr: 0.0001 + +# reward model +reward_path: './checkpoint/ImageReward' + +# xing loss for closed-form paths +xing_loss: + use: False + weight: 0.01 diff --git a/conf/x/vectorfusion.yaml b/conf/x/vectorfusion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9186cbc7f540e87a8b729456368465d12fb4db9c --- /dev/null +++ b/conf/x/vectorfusion.yaml @@ -0,0 +1,85 @@ +method: "vectorfusion" + +image_size: 600 # canvas size +path_svg: ~ # if you want to load a svg file and train from it +num_stages: 1 # training stages, you can train x strokes, then freeze them and train another x strokes etc +skip_live: False # if skip_live then training from scratch +style: "iconography" # "iconography", "pixelart", "low-poly", "painting", "sketch", "ink" + +# train +batch_size: 1 +num_iter: 500 # num_iter per path group +# lr and optim +lr_stage_one: + point: 1 + width: 0.1 + color: 0.01 + bg: 0.01 + optim: + name: 'adam' + betas: [ 0.9, 0.9 ] + eps: 1e-6 + lr_schedule: True # use lr_scheduler + schedule: + name: 'linear' + keep_ratio: 0.2 + decay_ratio: 0.4 +lr_stage_two: + point: 1 + width: 0.1 + color: 0.01 + bg: 0.01 + lr_schedule: True # use lr_scheduler + optim: + name: 'adam' + betas: [ 0.9, 0.9 ] + eps: 1e-6 + schedule: + name: 'cosine' + warmup_steps: 50 + warmup_start_lr: 0.02 + warmup_end_lr: 1.0 + cosine_end_lr: 0.4 + +# primitives +num_paths: 128 # number of strokes +path_schedule: 'repeat' # 'list' +schedule_each: 16 # [1, 3, 5, 7] +trainable_bg: False # set the background to be trainable +width: 3 # stroke width +num_segments: 4 +segment_init: 'circle' # 'random' +radius: 20 +coord_init: 'sparse' # 'random', 'naive', place the first control point +grid: 32 # divide the canvas into n grids +path_reinit: # reinitializing paths + use: True + freq: 50 # every 50 iterations + stop_step: 800 # for SDS fine-tuning + opacity_threshold: 0.05 + area_threshold: 64 + +# diffusion +model_id: "sd15" # sd14, sd15, sd21, sd21b, sdxl +ldm_speed_up: False +enable_xformers: True +gradient_checkpoint: False +cpu_offload: True +num_inference_steps: 50 +guidance_scale: 7.5 # sdxl default 5.0 +K: 6 +lora_path: ~ + +# SDS +sds: + im_size: 512 + guidance_scale: 100 + grad_scale: 1.0 + t_range: [ 0.05, 0.95 ] + num_iter: 1000 # fine-tuning steps + +# Live loss +use_distance_weighted_loss: True +xing_loss_weight: 0.01 +# pixel loss +penalty_weight: 0.05 \ No newline at end of file diff --git a/conf/x/wordasimage.yaml b/conf/x/wordasimage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..518dfa6f1bc7b66a702edd78b428f7273f4b58d6 --- /dev/null +++ b/conf/x/wordasimage.yaml @@ -0,0 +1,46 @@ +method: "wordasimage" + +image_size: 600 # canvas size +word: "BUNNY" +optim_letter: "Y" +prompt_suffix: "minimal flat 2d vector. lineal color. trending on artstation" + +# train +num_iter: 500 +lr_schedule: True +lr: + point_lr: 1 + lr_init: 0.002 + lr_final: 0.0008 + lr_delay_mult: 0.1 + lr_delay_steps: 100 + +# font +font: 'KaushanScript-Regular' +font_path: "./assets/fonts/${x.font}.ttf" +level_of_cc: 1 # 0 - original number of cc / 1 - recommended / 2 - more control points + +# diffusion +model_id: "sd15" +ldm_speed_up: False +enable_xformers: False +gradient_checkpoint: False +lora_path: ~ + +# SDS +sds: + im_size: 512 + guidance_scale: 100 + grad_scale: 1.0 + t_range: [ 0.05, 0.95 ] + num_iter: 1000 + +tone_loss: + use: True + dist_loss_weight: 100 + pixel_dist_kernel_blur: 201 + pixel_dist_sigma: 30 + +conformal: + use: True + angeles_w: 0.5 \ No newline at end of file diff --git a/data/alphabet1.svg b/data/alphabet1.svg new file mode 100644 index 0000000000000000000000000000000000000000..b7ba4081e8acd080b17bb817069494707eade0ca --- /dev/null +++ b/data/alphabet1.svg @@ -0,0 +1,726 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/data/ballerina.png b/data/ballerina.png new file mode 100644 index 0000000000000000000000000000000000000000..670d183d260833ec8a82bd8b256aeaa0fd44bfe5 Binary files /dev/null and b/data/ballerina.png differ diff --git a/data/ch1.svg b/data/ch1.svg new file mode 100644 index 0000000000000000000000000000000000000000..258ae103c5b8c92c9233a00e125fa20e49be5efe --- /dev/null +++ b/data/ch1.svg @@ -0,0 +1,1843 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/data/fallingwater.png b/data/fallingwater.png new file mode 100644 index 0000000000000000000000000000000000000000..9abc664d72ba02c9376fb2fc597d5ecfc31b9bd5 Binary files /dev/null and b/data/fallingwater.png differ diff --git a/data/horse.png b/data/horse.png new file mode 100644 index 0000000000000000000000000000000000000000..bbbaf6f1ac8ef659f515ae36e15d61060f38f3ec Binary files /dev/null and b/data/horse.png differ diff --git a/data/simile.png b/data/simile.png new file mode 100644 index 0000000000000000000000000000000000000000..5705c2ff34aa0df1cffe65d5e5be7b41a607224c Binary files /dev/null and b/data/simile.png differ diff --git a/data/starry.png b/data/starry.png new file mode 100644 index 0000000000000000000000000000000000000000..43d49bfc8742ec47d67bb1c94f8685fdccacf3c8 Binary files /dev/null and b/data/starry.png differ diff --git a/diffvg b/diffvg new file mode 160000 index 0000000000000000000000000000000000000000..6f60468bfdef5b9fec8cc3fa47b441dc2720eefc --- /dev/null +++ b/diffvg @@ -0,0 +1 @@ +Subproject commit 6f60468bfdef5b9fec8cc3fa47b441dc2720eefc diff --git a/lama/.gitignore b/lama/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..6880001dc5adfee501233ea6f8c6082cb5298c81 --- /dev/null +++ b/lama/.gitignore @@ -0,0 +1,137 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# temporary files +## IDEA +.idea/ +## vscode +.vscode/ +## vim +*.sw? diff --git a/lama/LICENSE b/lama/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ca822bb5f62a37a5a73f56a2d563b16dab46c03f --- /dev/null +++ b/lama/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [2021] Samsung Research + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/lama/README.md b/lama/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ccb93c735a0ff67ade2207dd6c212a9946cdfdd7 --- /dev/null +++ b/lama/README.md @@ -0,0 +1,475 @@ +# 🦙 LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions + +Official implementation by Samsung Research + +by Roman Suvorov, Elizaveta Logacheva, Anton Mashikhin, +Anastasia Remizova, Arsenii Ashukha, Aleksei Silvestrov, Naejin Kong, Harshith Goka, Kiwoong Park, Victor Lempitsky. + +

+ 🔥🔥🔥 +
+ +LaMa generalizes surprisingly well to much higher resolutions (~2k❗️) than it saw during training (256x256), and achieves the excellent performance even in challenging scenarios, e.g. completion of periodic structures. +

+ +[[Project page](https://saic-mdal.github.io/lama-project/)] [[arXiv](https://arxiv.org/abs/2109.07161)] [[Supplementary](https://ashukha.com/projects/lama_21/lama_supmat_2021.pdf)] [[BibTeX](https://senya-ashukha.github.io/projects/lama_21/paper.txt)] [[Casual GAN Papers Summary](https://www.casualganpapers.com/large-masks-fourier-convolutions-inpainting/LaMa-explained.html)] + +

+ + + +
+ Try out in Google Colab +

+ +

+ +

+ + +

+ +

+ +# LaMa development +(Feel free to share your paper by creating an issue) +- Amazing results [paper](https://arxiv.org/abs/2206.13644) / [video](https://www.youtube.com/watch?v=gEukhOheWgE) / code https://github.com/saic-mdal/lama/pull/112 / by Geomagical Labs ([geomagical.com](geomagical.com)) +

+ +

+ +# Non-official 3rd party apps: +(Feel free to share your app/implementation/demo by creating an issue) +- [https://cleanup.pictures](https://cleanup.pictures/) - a simple interactive object removal tool by [@cyrildiagne](https://twitter.com/cyrildiagne) + - [lama-cleaner](https://github.com/Sanster/lama-cleaner) by [@Sanster](https://github.com/Sanster/lama-cleaner) is a self-host version of [https://cleanup.pictures](https://cleanup.pictures/) +- Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio). See demo: [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/lama) by [@AK391](https://github.com/AK391) +- Telegram bot [@MagicEraserBot](https://t.me/MagicEraserBot) by [@Moldoteck](https://github.com/Moldoteck), [code](https://github.com/Moldoteck/MagicEraser) +- [Auto-LaMa](https://github.com/andy971022/auto-lama) = DE:TR object detection + LaMa inpainting by [@andy971022](https://github.com/andy971022) +- [LAMA-Magic-Eraser-Local](https://github.com/zhaoyun0071/LAMA-Magic-Eraser-Local) = a standalone inpainting application built with PyQt5 by [@zhaoyun0071](https://github.com/zhaoyun0071) +- [Hama](https://www.hama.app/) - object removal with a smart brush which simplifies mask drawing. +- [ModelScope](https://www.modelscope.cn/models/damo/cv_fft_inpainting_lama/summary) = the largest Model Community in Chinese by [@chenbinghui1](https://github.com/chenbinghui1). + +# Environment setup + +Clone the repo: +`git clone https://github.com/saic-mdal/lama.git` + +There are three options of an environment: + +1. Python virtualenv: + + ``` + virtualenv inpenv --python=/usr/bin/python3 + source inpenv/bin/activate + pip install torch==1.8.0 torchvision==0.9.0 + + cd lama + pip install -r requirements.txt + ``` + +2. Conda + + ``` + % Install conda for Linux, for other OS download miniconda at https://docs.conda.io/en/latest/miniconda.html + wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh + bash Miniconda3-latest-Linux-x86_64.sh -b -p $HOME/miniconda + $HOME/miniconda/bin/conda init bash + + cd lama + conda env create -f conda_env.yml + conda activate lama + conda install pytorch torchvision torchaudio cudatoolkit=10.2 -c pytorch -y + pip install pytorch-lightning==1.2.9 + ``` + +3. Docker: No actions are needed 🎉. + +# Inference + +Run +``` +cd lama +export TORCH_HOME=$(pwd) && export PYTHONPATH=$(pwd) +``` + +**1. Download pre-trained models** + +Install tool for yandex disk link extraction: + +``` +pip3 install wldhx.yadisk-direct +``` + +The best model (Places2, Places Challenge): + +```bash +# failed +curl -L $(yadisk-direct https://disk.yandex.ru/d/ouP6l8VJ0HpMZg) -o big-lama.zip +# new link +curl -L $(yadisk-direct https://disk.yandex.ru/d/kHJkc7bs7mKIVA) -o big-lama.zip +unzip big-lama.zip +``` + +All models (Places & CelebA-HQ): + +``` +curl -L $(yadisk-direct https://disk.yandex.ru/d/EgqaSnLohjuzAg) -o lama-models.zip +unzip lama-models.zip +``` + +**2. Prepare images and masks** + +Download test images: + +``` +curl -L $(yadisk-direct https://disk.yandex.ru/d/xKQJZeVRk5vLlQ) -o LaMa_test_images.zip +unzip LaMa_test_images.zip +``` +
+ OR prepare your data: +1) Create masks named as `[images_name]_maskXXX[image_suffix]`, put images and masks in the same folder. + +- You can use the [script](https://github.com/saic-mdal/lama/blob/main/bin/gen_mask_dataset.py) for random masks generation. +- Check the format of the files: + ``` + image1_mask001.png + image1.png + image2_mask001.png + image2.png + ``` + +2) Specify `image_suffix`, e.g. `.png` or `.jpg` or `_input.jpg` in `configs/prediction/default.yaml`. + +
+ + +**3. Predict** + +On the host machine: + + python3 bin/predict.py model.path=$(pwd)/big-lama indir=$(pwd)/LaMa_test_images outdir=$(pwd)/output + +**OR** in the docker + +The following command will pull the docker image from Docker Hub and execute the prediction script +``` +bash docker/2_predict.sh $(pwd)/big-lama $(pwd)/LaMa_test_images $(pwd)/output device=cpu +``` +Docker cuda: TODO + +**4. Predict with Refinement** + +On the host machine: + + python3 bin/predict.py refine=True model.path=$(pwd)/big-lama indir=$(pwd)/LaMa_test_images outdir=$(pwd)/output + +# Train and Eval + +⚠️ Warning: The training is not fully tested yet, e.g., did not re-training after refactoring ⚠️ + + +Make sure you run: + +``` +cd lama +export TORCH_HOME=$(pwd) && export PYTHONPATH=$(pwd) +``` + +Then download models for _perceptual loss_: + + mkdir -p ade20k/ade20k-resnet50dilated-ppm_deepsup/ + wget -P ade20k/ade20k-resnet50dilated-ppm_deepsup/ http://sceneparsing.csail.mit.edu/model/pytorch/ade20k-resnet50dilated-ppm_deepsup/encoder_epoch_20.pth + + +## Places + +⚠️ NB: FID/SSIM/LPIPS metric values for Places that we see in LaMa paper are computed on 30000 images that we produce in evaluation section below. +For more details on evaluation data check [[Section 3. Dataset splits in Supplementary](https://ashukha.com/projects/lama_21/lama_supmat_2021.pdf#subsection.3.1)] ⚠️ + +On the host machine: + + # Download data from http://places2.csail.mit.edu/download.html + # Places365-Standard: Train(105GB)/Test(19GB)/Val(2.1GB) from High-resolution images section + wget http://data.csail.mit.edu/places/places365/train_large_places365standard.tar + wget http://data.csail.mit.edu/places/places365/val_large.tar + wget http://data.csail.mit.edu/places/places365/test_large.tar + + # Unpack train/test/val data and create .yaml config for it + bash fetch_data/places_standard_train_prepare.sh + bash fetch_data/places_standard_test_val_prepare.sh + + # Sample images for test and viz at the end of epoch + bash fetch_data/places_standard_test_val_sample.sh + bash fetch_data/places_standard_test_val_gen_masks.sh + + # Run training + python3 bin/train.py -cn lama-fourier location=places_standard + + # To evaluate trained model and report metrics as in our paper + # we need to sample previously unseen 30k images and generate masks for them + bash fetch_data/places_standard_evaluation_prepare_data.sh + + # Infer model on thick/thin/medium masks in 256 and 512 and run evaluation + # like this: + python3 bin/predict.py \ + model.path=$(pwd)/experiments/__lama-fourier_/ \ + indir=$(pwd)/places_standard_dataset/evaluation/random_thick_512/ \ + outdir=$(pwd)/inference/random_thick_512 model.checkpoint=last.ckpt + + python3 bin/evaluate_predicts.py \ + $(pwd)/configs/eval2_gpu.yaml \ + $(pwd)/places_standard_dataset/evaluation/random_thick_512/ \ + $(pwd)/inference/random_thick_512 \ + $(pwd)/inference/random_thick_512_metrics.csv + + + +Docker: TODO + +## CelebA +On the host machine: + + # Make shure you are in lama folder + cd lama + export TORCH_HOME=$(pwd) && export PYTHONPATH=$(pwd) + + # Download CelebA-HQ dataset + # Download data256x256.zip from https://drive.google.com/drive/folders/11Vz0fqHS2rXDb5pprgTjpD7S2BAJhi1P + + # unzip & split into train/test/visualization & create config for it + bash fetch_data/celebahq_dataset_prepare.sh + + # generate masks for test and visual_test at the end of epoch + bash fetch_data/celebahq_gen_masks.sh + + # Run training + python3 bin/train.py -cn lama-fourier-celeba data.batch_size=10 + + # Infer model on thick/thin/medium masks in 256 and run evaluation + # like this: + python3 bin/predict.py \ + model.path=$(pwd)/experiments/__lama-fourier-celeba_/ \ + indir=$(pwd)/celeba-hq-dataset/visual_test_256/random_thick_256/ \ + outdir=$(pwd)/inference/celeba_random_thick_256 model.checkpoint=last.ckpt + + +Docker: TODO + +## Places Challenge + +On the host machine: + + # This script downloads multiple .tar files in parallel and unpacks them + # Places365-Challenge: Train(476GB) from High-resolution images (to train Big-Lama) + bash places_challenge_train_download.sh + + TODO: prepare + TODO: train + TODO: eval + +Docker: TODO + +## Create your data + +Please check bash scripts for data preparation and mask generation from CelebaHQ section, +if you stuck at one of the following steps. + + +On the host machine: + + # Make shure you are in lama folder + cd lama + export TORCH_HOME=$(pwd) && export PYTHONPATH=$(pwd) + + # You need to prepare following image folders: + $ ls my_dataset + train + val_source # 2000 or more images + visual_test_source # 100 or more images + eval_source # 2000 or more images + + # LaMa generates random masks for the train data on the flight, + # but needs fixed masks for test and visual_test for consistency of evaluation. + + # Suppose, we want to evaluate and pick best models + # on 512x512 val dataset with thick/thin/medium masks + # And your images have .jpg extention: + + python3 bin/gen_mask_dataset.py \ + $(pwd)/configs/data_gen/random__512.yaml \ # thick, thin, medium + my_dataset/val_source/ \ + my_dataset/val/random__512.yaml \# thick, thin, medium + --ext jpg + + # So the mask generator will: + # 1. resize and crop val images and save them as .png + # 2. generate masks + + ls my_dataset/val/random_medium_512/ + image1_crop000_mask000.png + image1_crop000.png + image2_crop000_mask000.png + image2_crop000.png + ... + + # Generate thick, thin, medium masks for visual_test folder: + + python3 bin/gen_mask_dataset.py \ + $(pwd)/configs/data_gen/random__512.yaml \ #thick, thin, medium + my_dataset/visual_test_source/ \ + my_dataset/visual_test/random__512/ \ #thick, thin, medium + --ext jpg + + + ls my_dataset/visual_test/random_thick_512/ + image1_crop000_mask000.png + image1_crop000.png + image2_crop000_mask000.png + image2_crop000.png + ... + + # Same process for eval_source image folder: + + python3 bin/gen_mask_dataset.py \ + $(pwd)/configs/data_gen/random__512.yaml \ #thick, thin, medium + my_dataset/eval_source/ \ + my_dataset/eval/random__512/ \ #thick, thin, medium + --ext jpg + + + + # Generate location config file which locate these folders: + + touch my_dataset.yaml + echo "data_root_dir: $(pwd)/my_dataset/" >> my_dataset.yaml + echo "out_root_dir: $(pwd)/experiments/" >> my_dataset.yaml + echo "tb_dir: $(pwd)/tb_logs/" >> my_dataset.yaml + mv my_dataset.yaml ${PWD}/configs/training/location/ + + + # Check data config for consistency with my_dataset folder structure: + $ cat ${PWD}/configs/training/data/abl-04-256-mh-dist + ... + train: + indir: ${location.data_root_dir}/train + ... + val: + indir: ${location.data_root_dir}/val + img_suffix: .png + visual_test: + indir: ${location.data_root_dir}/visual_test + img_suffix: .png + + + # Run training + python3 bin/train.py -cn lama-fourier location=my_dataset data.batch_size=10 + + # Evaluation: LaMa training procedure picks best few models according to + # scores on my_dataset/val/ + + # To evaluate one of your best models (i.e. at epoch=32) + # on previously unseen my_dataset/eval do the following + # for thin, thick and medium: + + # infer: + python3 bin/predict.py \ + model.path=$(pwd)/experiments/__lama-fourier_/ \ + indir=$(pwd)/my_dataset/eval/random__512/ \ + outdir=$(pwd)/inference/my_dataset/random__512 \ + model.checkpoint=epoch32.ckpt + + # metrics calculation: + python3 bin/evaluate_predicts.py \ + $(pwd)/configs/eval2_gpu.yaml \ + $(pwd)/my_dataset/eval/random__512/ \ + $(pwd)/inference/my_dataset/random__512 \ + $(pwd)/inference/my_dataset/random__512_metrics.csv + + +**OR** in the docker: + + TODO: train + TODO: eval + +# Hints + +### Generate different kinds of masks +The following command will execute a script that generates random masks. + + bash docker/1_generate_masks_from_raw_images.sh \ + configs/data_gen/random_medium_512.yaml \ + /directory_with_input_images \ + /directory_where_to_store_images_and_masks \ + --ext png + +The test data generation command stores images in the format, +which is suitable for [prediction](#prediction). + +The table below describes which configs we used to generate different test sets from the paper. +Note that we *do not fix a random seed*, so the results will be slightly different each time. + +| | Places 512x512 | CelebA 256x256 | +|--------|------------------------|------------------------| +| Narrow | random_thin_512.yaml | random_thin_256.yaml | +| Medium | random_medium_512.yaml | random_medium_256.yaml | +| Wide | random_thick_512.yaml | random_thick_256.yaml | + +Feel free to change the config path (argument #1) to any other config in `configs/data_gen` +or adjust config files themselves. + +### Override parameters in configs +Also you can override parameters in config like this: + + python3 bin/train.py -cn data.batch_size=10 run_title=my-title + +Where .yaml file extension is omitted + +### Models options +Config names for models from paper (substitude into the training command): + + * big-lama + * big-lama-regular + * lama-fourier + * lama-regular + * lama_small_train_masks + +Which are seated in configs/training/folder + +### Links +- All the data (models, test images, etc.) https://disk.yandex.ru/d/AmdeG-bIjmvSug +- Test images from the paper https://disk.yandex.ru/d/xKQJZeVRk5vLlQ +- The pre-trained models https://disk.yandex.ru/d/EgqaSnLohjuzAg +- The models for perceptual loss https://disk.yandex.ru/d/ncVmQlmT_kTemQ +- Our training logs are available at https://disk.yandex.ru/d/9Bt1wNSDS4jDkQ + + +### Training time & resources + +TODO + +## Acknowledgments + +* Segmentation code and models if form [CSAILVision](https://github.com/CSAILVision/semantic-segmentation-pytorch). +* LPIPS metric is from [richzhang](https://github.com/richzhang/PerceptualSimilarity) +* SSIM is from [Po-Hsun-Su](https://github.com/Po-Hsun-Su/pytorch-ssim) +* FID is from [mseitzer](https://github.com/mseitzer/pytorch-fid) + +## Citation +If you found this code helpful, please consider citing: +``` +@article{suvorov2021resolution, + title={Resolution-robust Large Mask Inpainting with Fourier Convolutions}, + author={Suvorov, Roman and Logacheva, Elizaveta and Mashikhin, Anton and Remizova, Anastasia and Ashukha, Arsenii and Silvestrov, Aleksei and Kong, Naejin and Goka, Harshith and Park, Kiwoong and Lempitsky, Victor}, + journal={arXiv preprint arXiv:2109.07161}, + year={2021} +} +``` + +
+
+
+ +
+
+

Copyright © 2021

+
diff --git a/lama/big-lama/config.yaml b/lama/big-lama/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..55fd91b5bcacd654e3045a2331e9c186818e6edc --- /dev/null +++ b/lama/big-lama/config.yaml @@ -0,0 +1,157 @@ +run_title: b18_ffc075_batch8x15 +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +optimizers: + generator: + kind: adam + lr: 0.001 + discriminator: + kind: adam + lr: 0.0001 +visualizer: + key_order: + - image + - predicted_image + - discr_output_fake + - discr_output_real + - inpainted + rescale_keys: + - discr_output_fake + - discr_output_real + kind: directory + outdir: /group-volume/User-Driven-Content-Generation/r.suvorov/inpainting/experiments/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/samples +location: + data_root_dir: /group-volume/User-Driven-Content-Generation/datasets/inpainting_data_root_large + out_root_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/experiments + tb_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/tb_logs +data: + batch_size: 15 + val_batch_size: 2 + num_workers: 3 + train: + indir: ${location.data_root_dir}/train + out_size: 256 + mask_gen_kwargs: + irregular_proba: 1 + irregular_kwargs: + max_angle: 4 + max_len: 200 + max_width: 100 + max_times: 5 + min_times: 1 + box_proba: 1 + box_kwargs: + margin: 10 + bbox_min_size: 30 + bbox_max_size: 150 + max_times: 3 + min_times: 1 + segm_proba: 0 + segm_kwargs: + confidence_threshold: 0.5 + max_object_area: 0.5 + min_mask_area: 0.07 + downsample_levels: 6 + num_variants_per_mask: 1 + rigidness_mode: 1 + max_foreground_coverage: 0.3 + max_foreground_intersection: 0.7 + max_mask_intersection: 0.1 + max_hidden_area: 0.1 + max_scale_change: 0.25 + horizontal_flip: true + max_vertical_shift: 0.2 + position_shuffle: true + transform_variant: distortions + dataloader_kwargs: + batch_size: ${data.batch_size} + shuffle: true + num_workers: ${data.num_workers} + val: + indir: ${location.data_root_dir}/val + img_suffix: .png + dataloader_kwargs: + batch_size: ${data.val_batch_size} + shuffle: false + num_workers: ${data.num_workers} + visual_test: + indir: ${location.data_root_dir}/korean_test + img_suffix: _input.png + pad_out_to_modulo: 32 + dataloader_kwargs: + batch_size: 1 + shuffle: false + num_workers: ${data.num_workers} +generator: + kind: ffc_resnet + input_nc: 4 + output_nc: 3 + ngf: 64 + n_downsampling: 3 + n_blocks: 18 + add_out_act: sigmoid + init_conv_kwargs: + ratio_gin: 0 + ratio_gout: 0 + enable_lfu: false + downsample_conv_kwargs: + ratio_gin: ${generator.init_conv_kwargs.ratio_gout} + ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin} + enable_lfu: false + resnet_conv_kwargs: + ratio_gin: 0.75 + ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin} + enable_lfu: false +discriminator: + kind: pix2pixhd_nlayer + input_nc: 3 + ndf: 64 + n_layers: 4 +evaluator: + kind: default + inpainted_key: inpainted + integral_kind: ssim_fid100_f1 +trainer: + kwargs: + gpus: -1 + accelerator: ddp + max_epochs: 200 + gradient_clip_val: 1 + log_gpu_memory: None + limit_train_batches: 25000 + val_check_interval: ${trainer.kwargs.limit_train_batches} + log_every_n_steps: 1000 + precision: 32 + terminate_on_nan: false + check_val_every_n_epoch: 1 + num_sanity_val_steps: 8 + limit_val_batches: 1000 + replace_sampler_ddp: false + checkpoint_kwargs: + verbose: true + save_top_k: 5 + save_last: true + period: 1 + monitor: val_ssim_fid100_f1_total_mean + mode: max diff --git a/lama/big-lama/models/best.ckpt b/lama/big-lama/models/best.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..da3a1c3db995ee150d5b9e61ad621345778a49ab --- /dev/null +++ b/lama/big-lama/models/best.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fccb7adffd53ec0974ee5503c3731c2c2f1e7e07856fd9228cdcc0b46fd5d423 +size 410046389 diff --git a/lama/bin/analyze_errors.py b/lama/bin/analyze_errors.py new file mode 100755 index 0000000000000000000000000000000000000000..a11f9478de76ede162f5511449ac98e549ff4b6e --- /dev/null +++ b/lama/bin/analyze_errors.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 +import cv2 +import numpy as np +import sklearn +import torch +import os +import pickle +import pandas as pd +import matplotlib.pyplot as plt +from joblib import Parallel, delayed + +from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset, load_image +from saicinpainting.evaluation.losses.fid.inception import InceptionV3 +from saicinpainting.evaluation.utils import load_yaml +from saicinpainting.training.visualizers.base import visualize_mask_and_images + + +def draw_score(img, score): + img = np.transpose(img, (1, 2, 0)) + cv2.putText(img, f'{score:.2f}', + (40, 40), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (0, 1, 0), + thickness=3) + img = np.transpose(img, (2, 0, 1)) + return img + + +def save_global_samples(global_mask_fnames, mask2real_fname, mask2fake_fname, out_dir, real_scores_by_fname, fake_scores_by_fname): + for cur_mask_fname in global_mask_fnames: + cur_real_fname = mask2real_fname[cur_mask_fname] + orig_img = load_image(cur_real_fname, mode='RGB') + fake_img = load_image(mask2fake_fname[cur_mask_fname], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]] + mask = load_image(cur_mask_fname, mode='L')[None, ...] + + draw_score(orig_img, real_scores_by_fname.loc[cur_real_fname, 'real_score']) + draw_score(fake_img, fake_scores_by_fname.loc[cur_mask_fname, 'fake_score']) + + cur_grid = visualize_mask_and_images(dict(image=orig_img, mask=mask, fake=fake_img), + keys=['image', 'fake'], + last_without_mask=True) + cur_grid = np.clip(cur_grid * 255, 0, 255).astype('uint8') + cur_grid = cv2.cvtColor(cur_grid, cv2.COLOR_RGB2BGR) + cv2.imwrite(os.path.join(out_dir, os.path.splitext(os.path.basename(cur_mask_fname))[0] + '.jpg'), + cur_grid) + + +def save_samples_by_real(worst_best_by_real, mask2fake_fname, fake_info, out_dir): + for real_fname in worst_best_by_real.index: + worst_mask_path = worst_best_by_real.loc[real_fname, 'worst'] + best_mask_path = worst_best_by_real.loc[real_fname, 'best'] + orig_img = load_image(real_fname, mode='RGB') + worst_mask_img = load_image(worst_mask_path, mode='L')[None, ...] + worst_fake_img = load_image(mask2fake_fname[worst_mask_path], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]] + best_mask_img = load_image(best_mask_path, mode='L')[None, ...] + best_fake_img = load_image(mask2fake_fname[best_mask_path], mode='RGB')[:, :orig_img.shape[1], :orig_img.shape[2]] + + draw_score(orig_img, worst_best_by_real.loc[real_fname, 'real_score']) + draw_score(worst_fake_img, worst_best_by_real.loc[real_fname, 'worst_score']) + draw_score(best_fake_img, worst_best_by_real.loc[real_fname, 'best_score']) + + cur_grid = visualize_mask_and_images(dict(image=orig_img, mask=np.zeros_like(worst_mask_img), + worst_mask=worst_mask_img, worst_img=worst_fake_img, + best_mask=best_mask_img, best_img=best_fake_img), + keys=['image', 'worst_mask', 'worst_img', 'best_mask', 'best_img'], + rescale_keys=['worst_mask', 'best_mask'], + last_without_mask=True) + cur_grid = np.clip(cur_grid * 255, 0, 255).astype('uint8') + cur_grid = cv2.cvtColor(cur_grid, cv2.COLOR_RGB2BGR) + cv2.imwrite(os.path.join(out_dir, + os.path.splitext(os.path.basename(real_fname))[0] + '.jpg'), + cur_grid) + + fig, (ax1, ax2) = plt.subplots(1, 2) + cur_stat = fake_info[fake_info['real_fname'] == real_fname] + cur_stat['fake_score'].hist(ax=ax1) + cur_stat['real_score'].hist(ax=ax2) + fig.tight_layout() + fig.savefig(os.path.join(out_dir, + os.path.splitext(os.path.basename(real_fname))[0] + '_scores.png')) + plt.close(fig) + + +def extract_overlapping_masks(mask_fnames, cur_i, fake_scores_table, max_overlaps_n=2): + result_pairs = [] + result_scores = [] + mask_fname_a = mask_fnames[cur_i] + mask_a = load_image(mask_fname_a, mode='L')[None, ...] > 0.5 + cur_score_a = fake_scores_table.loc[mask_fname_a, 'fake_score'] + for mask_fname_b in mask_fnames[cur_i + 1:]: + mask_b = load_image(mask_fname_b, mode='L')[None, ...] > 0.5 + if not np.any(mask_a & mask_b): + continue + cur_score_b = fake_scores_table.loc[mask_fname_b, 'fake_score'] + result_pairs.append((mask_fname_a, mask_fname_b)) + result_scores.append(cur_score_b - cur_score_a) + if len(result_pairs) >= max_overlaps_n: + break + return result_pairs, result_scores + + +def main(args): + config = load_yaml(args.config) + + latents_dir = os.path.join(args.outpath, 'latents') + os.makedirs(latents_dir, exist_ok=True) + global_worst_dir = os.path.join(args.outpath, 'global_worst') + os.makedirs(global_worst_dir, exist_ok=True) + global_best_dir = os.path.join(args.outpath, 'global_best') + os.makedirs(global_best_dir, exist_ok=True) + worst_best_by_best_worst_score_diff_max_dir = os.path.join(args.outpath, 'worst_best_by_real', 'best_worst_score_diff_max') + os.makedirs(worst_best_by_best_worst_score_diff_max_dir, exist_ok=True) + worst_best_by_best_worst_score_diff_min_dir = os.path.join(args.outpath, 'worst_best_by_real', 'best_worst_score_diff_min') + os.makedirs(worst_best_by_best_worst_score_diff_min_dir, exist_ok=True) + worst_best_by_real_best_score_diff_max_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_best_score_diff_max') + os.makedirs(worst_best_by_real_best_score_diff_max_dir, exist_ok=True) + worst_best_by_real_best_score_diff_min_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_best_score_diff_min') + os.makedirs(worst_best_by_real_best_score_diff_min_dir, exist_ok=True) + worst_best_by_real_worst_score_diff_max_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_worst_score_diff_max') + os.makedirs(worst_best_by_real_worst_score_diff_max_dir, exist_ok=True) + worst_best_by_real_worst_score_diff_min_dir = os.path.join(args.outpath, 'worst_best_by_real', 'real_worst_score_diff_min') + os.makedirs(worst_best_by_real_worst_score_diff_min_dir, exist_ok=True) + + if not args.only_report: + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048] + inception_model = InceptionV3([block_idx]).eval().cuda() + + dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs) + + real2vector_cache = {} + + real_features = [] + fake_features = [] + + orig_fnames = [] + mask_fnames = [] + mask2real_fname = {} + mask2fake_fname = {} + + for batch_i, batch in enumerate(dataset): + orig_img_fname = dataset.img_filenames[batch_i] + mask_fname = dataset.mask_filenames[batch_i] + fake_fname = dataset.pred_filenames[batch_i] + mask2real_fname[mask_fname] = orig_img_fname + mask2fake_fname[mask_fname] = fake_fname + + cur_real_vector = real2vector_cache.get(orig_img_fname, None) + if cur_real_vector is None: + with torch.no_grad(): + in_img = torch.from_numpy(batch['image'][None, ...]).cuda() + cur_real_vector = inception_model(in_img)[0].squeeze(-1).squeeze(-1).cpu().numpy() + real2vector_cache[orig_img_fname] = cur_real_vector + + pred_img = torch.from_numpy(batch['inpainted'][None, ...]).cuda() + cur_fake_vector = inception_model(pred_img)[0].squeeze(-1).squeeze(-1).cpu().numpy() + + real_features.append(cur_real_vector) + fake_features.append(cur_fake_vector) + + orig_fnames.append(orig_img_fname) + mask_fnames.append(mask_fname) + + ids_features = np.concatenate(real_features + fake_features, axis=0) + ids_labels = np.array(([1] * len(real_features)) + ([0] * len(fake_features))) + + with open(os.path.join(latents_dir, 'featues.pkl'), 'wb') as f: + pickle.dump(ids_features, f, protocol=3) + with open(os.path.join(latents_dir, 'labels.pkl'), 'wb') as f: + pickle.dump(ids_labels, f, protocol=3) + with open(os.path.join(latents_dir, 'orig_fnames.pkl'), 'wb') as f: + pickle.dump(orig_fnames, f, protocol=3) + with open(os.path.join(latents_dir, 'mask_fnames.pkl'), 'wb') as f: + pickle.dump(mask_fnames, f, protocol=3) + with open(os.path.join(latents_dir, 'mask2real_fname.pkl'), 'wb') as f: + pickle.dump(mask2real_fname, f, protocol=3) + with open(os.path.join(latents_dir, 'mask2fake_fname.pkl'), 'wb') as f: + pickle.dump(mask2fake_fname, f, protocol=3) + + svm = sklearn.svm.LinearSVC(dual=False) + svm.fit(ids_features, ids_labels) + + pred_scores = svm.decision_function(ids_features) + real_scores = pred_scores[:len(real_features)] + fake_scores = pred_scores[len(real_features):] + + with open(os.path.join(latents_dir, 'pred_scores.pkl'), 'wb') as f: + pickle.dump(pred_scores, f, protocol=3) + with open(os.path.join(latents_dir, 'real_scores.pkl'), 'wb') as f: + pickle.dump(real_scores, f, protocol=3) + with open(os.path.join(latents_dir, 'fake_scores.pkl'), 'wb') as f: + pickle.dump(fake_scores, f, protocol=3) + else: + with open(os.path.join(latents_dir, 'orig_fnames.pkl'), 'rb') as f: + orig_fnames = pickle.load(f) + with open(os.path.join(latents_dir, 'mask_fnames.pkl'), 'rb') as f: + mask_fnames = pickle.load(f) + with open(os.path.join(latents_dir, 'mask2real_fname.pkl'), 'rb') as f: + mask2real_fname = pickle.load(f) + with open(os.path.join(latents_dir, 'mask2fake_fname.pkl'), 'rb') as f: + mask2fake_fname = pickle.load(f) + with open(os.path.join(latents_dir, 'real_scores.pkl'), 'rb') as f: + real_scores = pickle.load(f) + with open(os.path.join(latents_dir, 'fake_scores.pkl'), 'rb') as f: + fake_scores = pickle.load(f) + + real_info = pd.DataFrame(data=[dict(real_fname=fname, + real_score=score) + for fname, score + in zip(orig_fnames, real_scores)]) + real_info.set_index('real_fname', drop=True, inplace=True) + + fake_info = pd.DataFrame(data=[dict(mask_fname=fname, + fake_fname=mask2fake_fname[fname], + real_fname=mask2real_fname[fname], + fake_score=score) + for fname, score + in zip(mask_fnames, fake_scores)]) + fake_info = fake_info.join(real_info, on='real_fname', how='left') + fake_info.drop_duplicates(['fake_fname', 'real_fname'], inplace=True) + + fake_stats_by_real = fake_info.groupby('real_fname')['fake_score'].describe()[['mean', 'std']].rename( + {'mean': 'mean_fake_by_real', 'std': 'std_fake_by_real'}, axis=1) + fake_info = fake_info.join(fake_stats_by_real, on='real_fname', rsuffix='stat_by_real') + fake_info.drop_duplicates(['fake_fname', 'real_fname'], inplace=True) + fake_info.to_csv(os.path.join(latents_dir, 'join_scores_table.csv'), sep='\t', index=False) + + fake_scores_table = fake_info.set_index('mask_fname')['fake_score'].to_frame() + real_scores_table = fake_info.set_index('real_fname')['real_score'].drop_duplicates().to_frame() + + fig, (ax1, ax2) = plt.subplots(1, 2) + ax1.hist(fake_scores) + ax2.hist(real_scores) + fig.tight_layout() + fig.savefig(os.path.join(args.outpath, 'global_scores_hist.png')) + plt.close(fig) + + global_worst_masks = fake_info.sort_values('fake_score', ascending=True)['mask_fname'].iloc[:config.take_global_top].to_list() + global_best_masks = fake_info.sort_values('fake_score', ascending=False)['mask_fname'].iloc[:config.take_global_top].to_list() + save_global_samples(global_worst_masks, mask2real_fname, mask2fake_fname, global_worst_dir, real_scores_table, fake_scores_table) + save_global_samples(global_best_masks, mask2real_fname, mask2fake_fname, global_best_dir, real_scores_table, fake_scores_table) + + # grouped by real + worst_samples_by_real = fake_info.groupby('real_fname').apply( + lambda d: d.set_index('mask_fname')['fake_score'].idxmin()).to_frame().rename({0: 'worst'}, axis=1) + best_samples_by_real = fake_info.groupby('real_fname').apply( + lambda d: d.set_index('mask_fname')['fake_score'].idxmax()).to_frame().rename({0: 'best'}, axis=1) + worst_best_by_real = pd.concat([worst_samples_by_real, best_samples_by_real], axis=1) + + worst_best_by_real = worst_best_by_real.join(fake_scores_table.rename({'fake_score': 'worst_score'}, axis=1), + on='worst') + worst_best_by_real = worst_best_by_real.join(fake_scores_table.rename({'fake_score': 'best_score'}, axis=1), + on='best') + worst_best_by_real = worst_best_by_real.join(real_scores_table) + + worst_best_by_real['best_worst_score_diff'] = worst_best_by_real['best_score'] - worst_best_by_real['worst_score'] + worst_best_by_real['real_best_score_diff'] = worst_best_by_real['real_score'] - worst_best_by_real['best_score'] + worst_best_by_real['real_worst_score_diff'] = worst_best_by_real['real_score'] - worst_best_by_real['worst_score'] + + worst_best_by_best_worst_score_diff_min = worst_best_by_real.sort_values('best_worst_score_diff', ascending=True).iloc[:config.take_worst_best_top] + worst_best_by_best_worst_score_diff_max = worst_best_by_real.sort_values('best_worst_score_diff', ascending=False).iloc[:config.take_worst_best_top] + save_samples_by_real(worst_best_by_best_worst_score_diff_min, mask2fake_fname, fake_info, worst_best_by_best_worst_score_diff_min_dir) + save_samples_by_real(worst_best_by_best_worst_score_diff_max, mask2fake_fname, fake_info, worst_best_by_best_worst_score_diff_max_dir) + + worst_best_by_real_best_score_diff_min = worst_best_by_real.sort_values('real_best_score_diff', ascending=True).iloc[:config.take_worst_best_top] + worst_best_by_real_best_score_diff_max = worst_best_by_real.sort_values('real_best_score_diff', ascending=False).iloc[:config.take_worst_best_top] + save_samples_by_real(worst_best_by_real_best_score_diff_min, mask2fake_fname, fake_info, worst_best_by_real_best_score_diff_min_dir) + save_samples_by_real(worst_best_by_real_best_score_diff_max, mask2fake_fname, fake_info, worst_best_by_real_best_score_diff_max_dir) + + worst_best_by_real_worst_score_diff_min = worst_best_by_real.sort_values('real_worst_score_diff', ascending=True).iloc[:config.take_worst_best_top] + worst_best_by_real_worst_score_diff_max = worst_best_by_real.sort_values('real_worst_score_diff', ascending=False).iloc[:config.take_worst_best_top] + save_samples_by_real(worst_best_by_real_worst_score_diff_min, mask2fake_fname, fake_info, worst_best_by_real_worst_score_diff_min_dir) + save_samples_by_real(worst_best_by_real_worst_score_diff_max, mask2fake_fname, fake_info, worst_best_by_real_worst_score_diff_max_dir) + + # analyze what change of mask causes bigger change of score + overlapping_mask_fname_pairs = [] + overlapping_mask_fname_score_diffs = [] + for cur_real_fname in orig_fnames: + cur_fakes_info = fake_info[fake_info['real_fname'] == cur_real_fname] + cur_mask_fnames = sorted(cur_fakes_info['mask_fname'].unique()) + + cur_mask_pairs_and_scores = Parallel(args.n_jobs)( + delayed(extract_overlapping_masks)(cur_mask_fnames, i, fake_scores_table) + for i in range(len(cur_mask_fnames) - 1) + ) + for cur_pairs, cur_scores in cur_mask_pairs_and_scores: + overlapping_mask_fname_pairs.extend(cur_pairs) + overlapping_mask_fname_score_diffs.extend(cur_scores) + + overlapping_mask_fname_pairs = np.asarray(overlapping_mask_fname_pairs) + overlapping_mask_fname_score_diffs = np.asarray(overlapping_mask_fname_score_diffs) + overlapping_sort_idx = np.argsort(overlapping_mask_fname_score_diffs) + overlapping_mask_fname_pairs = overlapping_mask_fname_pairs[overlapping_sort_idx] + overlapping_mask_fname_score_diffs = overlapping_mask_fname_score_diffs[overlapping_sort_idx] + + + + + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('config', type=str, help='Path to config for dataset generation') + aparser.add_argument('datadir', type=str, + help='Path to folder with images and masks (output of gen_mask_dataset.py)') + aparser.add_argument('predictdir', type=str, + help='Path to folder with predicts (e.g. predict_hifill_baseline.py)') + aparser.add_argument('outpath', type=str, help='Where to put results') + aparser.add_argument('--only-report', action='store_true', + help='Whether to skip prediction and feature extraction, ' + 'load all the possible latents and proceed with report only') + aparser.add_argument('--n-jobs', type=int, default=8, help='how many processes to use for pair mask mining') + + main(aparser.parse_args()) diff --git a/lama/bin/blur_predicts.py b/lama/bin/blur_predicts.py new file mode 100755 index 0000000000000000000000000000000000000000..a14fcc28d5a906ad3a21ab4ba482f38b4fc411cb --- /dev/null +++ b/lama/bin/blur_predicts.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 + +import os + +import cv2 +import numpy as np +import tqdm + +from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset +from saicinpainting.evaluation.utils import load_yaml + + +def main(args): + config = load_yaml(args.config) + + if not args.predictdir.endswith('/'): + args.predictdir += '/' + + dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs) + + os.makedirs(os.path.dirname(args.outpath), exist_ok=True) + + for img_i in tqdm.trange(len(dataset)): + pred_fname = dataset.pred_filenames[img_i] + cur_out_fname = os.path.join(args.outpath, pred_fname[len(args.predictdir):]) + os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) + + sample = dataset[img_i] + img = sample['image'] + mask = sample['mask'] + inpainted = sample['inpainted'] + + inpainted_blurred = cv2.GaussianBlur(np.transpose(inpainted, (1, 2, 0)), + ksize=(args.k, args.k), + sigmaX=args.s, sigmaY=args.s, + borderType=cv2.BORDER_REFLECT) + + cur_res = (1 - mask) * np.transpose(img, (1, 2, 0)) + mask * inpainted_blurred + cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8') + cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) + cv2.imwrite(cur_out_fname, cur_res) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('config', type=str, help='Path to evaluation config') + aparser.add_argument('datadir', type=str, + help='Path to folder with images and masks (output of gen_mask_dataset.py)') + aparser.add_argument('predictdir', type=str, + help='Path to folder with predicts (e.g. predict_hifill_baseline.py)') + aparser.add_argument('outpath', type=str, help='Where to put results') + aparser.add_argument('-s', type=float, default=0.1, help='Gaussian blur sigma') + aparser.add_argument('-k', type=int, default=5, help='Kernel size in gaussian blur') + + main(aparser.parse_args()) diff --git a/lama/bin/calc_dataset_stats.py b/lama/bin/calc_dataset_stats.py new file mode 100755 index 0000000000000000000000000000000000000000..5086fea1bab691892f2e52e3c59e5ef048bcfac0 --- /dev/null +++ b/lama/bin/calc_dataset_stats.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +import os + +import numpy as np +import tqdm +from scipy.ndimage.morphology import distance_transform_edt + +from saicinpainting.evaluation.data import InpaintingDataset +from saicinpainting.evaluation.vis import save_item_for_vis + + +def main(args): + dataset = InpaintingDataset(args.datadir, img_suffix='.png') + + area_bins = np.linspace(0, 1, args.area_bins + 1) + + heights = [] + widths = [] + image_areas = [] + hole_areas = [] + hole_area_percents = [] + known_pixel_distances = [] + + area_bins_count = np.zeros(args.area_bins) + area_bin_titles = [f'{area_bins[i] * 100:.0f}-{area_bins[i + 1] * 100:.0f}' for i in range(args.area_bins)] + + bin2i = [[] for _ in range(args.area_bins)] + + for i, item in enumerate(tqdm.tqdm(dataset)): + h, w = item['image'].shape[1:] + heights.append(h) + widths.append(w) + full_area = h * w + image_areas.append(full_area) + bin_mask = item['mask'] > 0.5 + hole_area = bin_mask.sum() + hole_areas.append(hole_area) + hole_percent = hole_area / full_area + hole_area_percents.append(hole_percent) + bin_i = np.clip(np.searchsorted(area_bins, hole_percent) - 1, 0, len(area_bins_count) - 1) + area_bins_count[bin_i] += 1 + bin2i[bin_i].append(i) + + cur_dist = distance_transform_edt(bin_mask) + cur_dist_inside_mask = cur_dist[bin_mask] + known_pixel_distances.append(cur_dist_inside_mask.mean()) + + os.makedirs(args.outdir, exist_ok=True) + with open(os.path.join(args.outdir, 'summary.txt'), 'w') as f: + f.write(f'''Location: {args.datadir} + +Number of samples: {len(dataset)} + +Image height: min {min(heights):5d} max {max(heights):5d} mean {np.mean(heights):.2f} +Image width: min {min(widths):5d} max {max(widths):5d} mean {np.mean(widths):.2f} +Image area: min {min(image_areas):7d} max {max(image_areas):7d} mean {np.mean(image_areas):.2f} +Hole area: min {min(hole_areas):7d} max {max(hole_areas):7d} mean {np.mean(hole_areas):.2f} +Hole area %: min {min(hole_area_percents) * 100:2.2f} max {max(hole_area_percents) * 100:2.2f} mean {np.mean(hole_area_percents) * 100:2.2f} +Dist 2known: min {min(known_pixel_distances):2.2f} max {max(known_pixel_distances):2.2f} mean {np.mean(known_pixel_distances):2.2f} median {np.median(known_pixel_distances):2.2f} + +Stats by hole area %: +''') + for bin_i in range(args.area_bins): + f.write(f'{area_bin_titles[bin_i]}%: ' + f'samples number {area_bins_count[bin_i]}, ' + f'{area_bins_count[bin_i] / len(dataset) * 100:.1f}%\n') + + for bin_i in range(args.area_bins): + bindir = os.path.join(args.outdir, 'samples', area_bin_titles[bin_i]) + os.makedirs(bindir, exist_ok=True) + bin_idx = bin2i[bin_i] + for sample_i in np.random.choice(bin_idx, size=min(len(bin_idx), args.samples_n), replace=False): + save_item_for_vis(dataset[sample_i], os.path.join(bindir, f'{sample_i}.png')) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('datadir', type=str, + help='Path to folder with images and masks (output of gen_mask_dataset.py)') + aparser.add_argument('outdir', type=str, help='Where to put results') + aparser.add_argument('--samples-n', type=int, default=10, + help='Number of sample images with masks to copy for visualization for each area bin') + aparser.add_argument('--area-bins', type=int, default=10, help='How many area bins to have') + + main(aparser.parse_args()) diff --git a/lama/bin/debug/analyze_overlapping_masks.sh b/lama/bin/debug/analyze_overlapping_masks.sh new file mode 100755 index 0000000000000000000000000000000000000000..4a4727b0129007d9b0eed3fc25780adb565965a2 --- /dev/null +++ b/lama/bin/debug/analyze_overlapping_masks.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +BASEDIR="$(dirname $0)" + +# paths are valid for mml7 + +# select images +#ls /data/inpainting/work/data/train | shuf | head -2000 | xargs -n1 -I{} cp {} /data/inpainting/mask_analysis/src + +# generate masks +#"$BASEDIR/../gen_debug_mask_dataset.py" \ +# "$BASEDIR/../../configs/debug_mask_gen.yaml" \ +# "/data/inpainting/mask_analysis/src" \ +# "/data/inpainting/mask_analysis/generated" + +# predict +#"$BASEDIR/../predict.py" \ +# model.path="simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/saved_checkpoint/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15_epoch22-step-574999" \ +# indir="/data/inpainting/mask_analysis/generated" \ +# outdir="/data/inpainting/mask_analysis/predicted" \ +# dataset.img_suffix=.jpg \ +# +out_ext=.jpg + +# analyze good and bad samples +"$BASEDIR/../analyze_errors.py" \ + --only-report \ + --n-jobs 8 \ + "$BASEDIR/../../configs/analyze_mask_errors.yaml" \ + "/data/inpainting/mask_analysis/small/generated" \ + "/data/inpainting/mask_analysis/small/predicted" \ + "/data/inpainting/mask_analysis/small/report" diff --git a/lama/bin/evaluate_predicts.py b/lama/bin/evaluate_predicts.py new file mode 100755 index 0000000000000000000000000000000000000000..a4c182a50bc0cc3e2e03c713c2c0be2a804b04b8 --- /dev/null +++ b/lama/bin/evaluate_predicts.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +import os + +import pandas as pd + +from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset +from saicinpainting.evaluation.evaluator import InpaintingEvaluator, lpips_fid100_f1 +from saicinpainting.evaluation.losses.base_loss import SegmentationAwareSSIM, \ + SegmentationClassStats, SSIMScore, LPIPSScore, FIDScore, SegmentationAwareLPIPS, SegmentationAwareFID +from saicinpainting.evaluation.utils import load_yaml + + +def main(args): + config = load_yaml(args.config) + + dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs) + + metrics = { + 'ssim': SSIMScore(), + 'lpips': LPIPSScore(), + 'fid': FIDScore() + } + enable_segm = config.get('segmentation', dict(enable=False)).get('enable', False) + if enable_segm: + weights_path = os.path.expandvars(config.segmentation.weights_path) + metrics.update(dict( + segm_stats=SegmentationClassStats(weights_path=weights_path), + segm_ssim=SegmentationAwareSSIM(weights_path=weights_path), + segm_lpips=SegmentationAwareLPIPS(weights_path=weights_path), + segm_fid=SegmentationAwareFID(weights_path=weights_path) + )) + evaluator = InpaintingEvaluator(dataset, scores=metrics, + integral_title='lpips_fid100_f1', integral_func=lpips_fid100_f1, + **config.evaluator_kwargs) + + os.makedirs(os.path.dirname(args.outpath), exist_ok=True) + + results = evaluator.evaluate() + + results = pd.DataFrame(results).stack(1).unstack(0) + results.dropna(axis=1, how='all', inplace=True) + results.to_csv(args.outpath, sep='\t', float_format='%.4f') + + if enable_segm: + only_short_results = results[[c for c in results.columns if not c[0].startswith('segm_')]].dropna(axis=1, how='all') + only_short_results.to_csv(args.outpath + '_short', sep='\t', float_format='%.4f') + + print(only_short_results) + + segm_metrics_results = results[['segm_ssim', 'segm_lpips', 'segm_fid']].dropna(axis=1, how='all').transpose().unstack(0).reorder_levels([1, 0], axis=1) + segm_metrics_results.drop(['mean', 'std'], axis=0, inplace=True) + + segm_stats_results = results['segm_stats'].dropna(axis=1, how='all').transpose() + segm_stats_results.index = pd.MultiIndex.from_tuples(n.split('/') for n in segm_stats_results.index) + segm_stats_results = segm_stats_results.unstack(0).reorder_levels([1, 0], axis=1) + segm_stats_results.sort_index(axis=1, inplace=True) + segm_stats_results.dropna(axis=0, how='all', inplace=True) + + segm_results = pd.concat([segm_metrics_results, segm_stats_results], axis=1, sort=True) + segm_results.sort_values(('mask_freq', 'total'), ascending=False, inplace=True) + + segm_results.to_csv(args.outpath + '_segm', sep='\t', float_format='%.4f') + else: + print(results) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('config', type=str, help='Path to evaluation config') + aparser.add_argument('datadir', type=str, + help='Path to folder with images and masks (output of gen_mask_dataset.py)') + aparser.add_argument('predictdir', type=str, + help='Path to folder with predicts (e.g. predict_hifill_baseline.py)') + aparser.add_argument('outpath', type=str, help='Where to put results') + + main(aparser.parse_args()) diff --git a/lama/bin/evaluator_example.py b/lama/bin/evaluator_example.py new file mode 100644 index 0000000000000000000000000000000000000000..669e3c53c1218444a880dc78f19a565a406ff6dc --- /dev/null +++ b/lama/bin/evaluator_example.py @@ -0,0 +1,76 @@ +import os + +import cv2 +import numpy as np +import torch +from skimage import io +from skimage.transform import resize +from torch.utils.data import Dataset + +from saicinpainting.evaluation.evaluator import InpaintingEvaluator +from saicinpainting.evaluation.losses.base_loss import SSIMScore, LPIPSScore, FIDScore + + +class SimpleImageDataset(Dataset): + def __init__(self, root_dir, image_size=(400, 600)): + self.root_dir = root_dir + self.files = sorted(os.listdir(root_dir)) + self.image_size = image_size + + def __getitem__(self, index): + img_name = os.path.join(self.root_dir, self.files[index]) + image = io.imread(img_name) + image = resize(image, self.image_size, anti_aliasing=True) + image = torch.FloatTensor(image).permute(2, 0, 1) + return image + + def __len__(self): + return len(self.files) + + +def create_rectangle_mask(height, width): + mask = np.ones((height, width)) + up_left_corner = width // 4, height // 4 + down_right_corner = (width - up_left_corner[0] - 1, height - up_left_corner[1] - 1) + cv2.rectangle(mask, up_left_corner, down_right_corner, (0, 0, 0), thickness=cv2.FILLED) + return mask + + +class Model(): + def __call__(self, img_batch, mask_batch): + mean = (img_batch * mask_batch[:, None, :, :]).sum(dim=(2, 3)) / mask_batch.sum(dim=(1, 2))[:, None] + inpainted = mean[:, :, None, None] * (1 - mask_batch[:, None, :, :]) + img_batch * mask_batch[:, None, :, :] + return inpainted + + +class SimpleImageSquareMaskDataset(Dataset): + def __init__(self, dataset): + self.dataset = dataset + self.mask = torch.FloatTensor(create_rectangle_mask(*self.dataset.image_size)) + self.model = Model() + + def __getitem__(self, index): + img = self.dataset[index] + mask = self.mask.clone() + inpainted = self.model(img[None, ...], mask[None, ...]) + return dict(image=img, mask=mask, inpainted=inpainted) + + def __len__(self): + return len(self.dataset) + + +dataset = SimpleImageDataset('imgs') +mask_dataset = SimpleImageSquareMaskDataset(dataset) +model = Model() +metrics = { + 'ssim': SSIMScore(), + 'lpips': LPIPSScore(), + 'fid': FIDScore() +} + +evaluator = InpaintingEvaluator( + mask_dataset, scores=metrics, batch_size=3, area_grouping=True +) + +results = evaluator.evaluate(model) +print(results) diff --git a/lama/bin/extract_masks.py b/lama/bin/extract_masks.py new file mode 100755 index 0000000000000000000000000000000000000000..d114e0fe470595f1d2aaeeeb84b36352f65b121e --- /dev/null +++ b/lama/bin/extract_masks.py @@ -0,0 +1,63 @@ +import PIL.Image as Image +import numpy as np +import os + + +def main(args): + if not args.indir.endswith('/'): + args.indir += '/' + os.makedirs(args.outdir, exist_ok=True) + + src_images = [ + args.indir+fname for fname in os.listdir(args.indir)] + + tgt_masks = [ + args.outdir+fname[:-4] + f'_mask000.png' + for fname in os.listdir(args.indir)] + + for img_name, msk_name in zip(src_images, tgt_masks): + #print(img) + #print(msk) + + image = Image.open(img_name).convert('RGB') + image = np.transpose(np.array(image), (2, 0, 1)) + + mask = (image == 255).astype(int) + + print(mask.dtype, mask.shape) + + + Image.fromarray( + np.clip(mask[0,:,:] * 255, 0, 255).astype('uint8'),mode='L' + ).save(msk_name) + + + + + ''' + for infile in src_images: + try: + file_relpath = infile[len(indir):] + img_outpath = os.path.join(outdir, file_relpath) + os.makedirs(os.path.dirname(img_outpath), exist_ok=True) + + image = Image.open(infile).convert('RGB') + + mask = + + Image.fromarray( + np.clip( + cur_mask * 255, 0, 255).astype('uint8'), + mode='L' + ).save(cur_basename + f'_mask{i:03d}.png') + ''' + + + +if __name__ == '__main__': + import argparse + aparser = argparse.ArgumentParser() + aparser.add_argument('--indir', type=str, help='Path to folder with images') + aparser.add_argument('--outdir', type=str, help='Path to folder to store aligned images and masks to') + + main(aparser.parse_args()) diff --git a/lama/bin/filter_sharded_dataset.py b/lama/bin/filter_sharded_dataset.py new file mode 100755 index 0000000000000000000000000000000000000000..b3c2b490e88bb3b55c6bb717e08f97f7a396d5fa --- /dev/null +++ b/lama/bin/filter_sharded_dataset.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 + + +import math +import os +import random + +import braceexpand +import webdataset as wds + +DEFAULT_CATS_FILE = os.path.join(os.path.dirname(__file__), '..', 'configs', 'places2-categories_157.txt') + +def is_good_key(key, cats): + return any(c in key for c in cats) + + +def main(args): + if args.categories == 'nofilter': + good_categories = None + else: + with open(args.categories, 'r') as f: + good_categories = set(line.strip().split(' ')[0] for line in f if line.strip()) + + all_input_files = list(braceexpand.braceexpand(args.infile)) + chunk_size = int(math.ceil(len(all_input_files) / args.n_read_streams)) + + input_iterators = [iter(wds.Dataset(all_input_files[start : start + chunk_size]).shuffle(args.shuffle_buffer)) + for start in range(0, len(all_input_files), chunk_size)] + output_datasets = [wds.ShardWriter(args.outpattern.format(i)) for i in range(args.n_write_streams)] + + good_readers = list(range(len(input_iterators))) + step_i = 0 + good_samples = 0 + bad_samples = 0 + while len(good_readers) > 0: + if step_i % args.print_freq == 0: + print(f'Iterations done {step_i}; readers alive {good_readers}; good samples {good_samples}; bad samples {bad_samples}') + + step_i += 1 + + ri = random.choice(good_readers) + try: + sample = next(input_iterators[ri]) + except StopIteration: + good_readers = list(set(good_readers) - {ri}) + continue + + if good_categories is not None and not is_good_key(sample['__key__'], good_categories): + bad_samples += 1 + continue + + wi = random.randint(0, args.n_write_streams - 1) + output_datasets[wi].write(sample) + good_samples += 1 + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('--categories', type=str, default=DEFAULT_CATS_FILE) + aparser.add_argument('--shuffle-buffer', type=int, default=10000) + aparser.add_argument('--n-read-streams', type=int, default=10) + aparser.add_argument('--n-write-streams', type=int, default=10) + aparser.add_argument('--print-freq', type=int, default=1000) + aparser.add_argument('infile', type=str) + aparser.add_argument('outpattern', type=str) + + main(aparser.parse_args()) diff --git a/lama/bin/gen_debug_mask_dataset.py b/lama/bin/gen_debug_mask_dataset.py new file mode 100755 index 0000000000000000000000000000000000000000..738f76875c82aa412063bb5bff15e69c46f20362 --- /dev/null +++ b/lama/bin/gen_debug_mask_dataset.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 + +import glob +import os + +import PIL.Image as Image +import cv2 +import numpy as np +import tqdm +import shutil + + +from saicinpainting.evaluation.utils import load_yaml + + +def generate_masks_for_img(infile, outmask_pattern, mask_size=200, step=0.5): + inimg = Image.open(infile) + width, height = inimg.size + step_abs = int(mask_size * step) + + mask = np.zeros((height, width), dtype='uint8') + mask_i = 0 + + for start_vertical in range(0, height - step_abs, step_abs): + for start_horizontal in range(0, width - step_abs, step_abs): + mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 255 + + cv2.imwrite(outmask_pattern.format(mask_i), mask) + + mask[start_vertical:start_vertical + mask_size, start_horizontal:start_horizontal + mask_size] = 0 + mask_i += 1 + + +def main(args): + if not args.indir.endswith('/'): + args.indir += '/' + if not args.outdir.endswith('/'): + args.outdir += '/' + + config = load_yaml(args.config) + + in_files = list(glob.glob(os.path.join(args.indir, '**', f'*{config.img_ext}'), recursive=True)) + for infile in tqdm.tqdm(in_files): + outimg = args.outdir + infile[len(args.indir):] + outmask_pattern = outimg[:-len(config.img_ext)] + '_mask{:04d}.png' + + os.makedirs(os.path.dirname(outimg), exist_ok=True) + shutil.copy2(infile, outimg) + + generate_masks_for_img(infile, outmask_pattern, **config.gen_kwargs) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('config', type=str, help='Path to config for dataset generation') + aparser.add_argument('indir', type=str, help='Path to folder with images') + aparser.add_argument('outdir', type=str, help='Path to folder to store aligned images and masks to') + + main(aparser.parse_args()) diff --git a/lama/bin/gen_mask_dataset.py b/lama/bin/gen_mask_dataset.py new file mode 100755 index 0000000000000000000000000000000000000000..6e2ce3a9bc9708fd46641cab815113508af32d02 --- /dev/null +++ b/lama/bin/gen_mask_dataset.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python3 + +import glob +import os +import shutil +import traceback + +import PIL.Image as Image +import numpy as np +from joblib import Parallel, delayed + +from saicinpainting.evaluation.masks.mask import SegmentationMask, propose_random_square_crop +from saicinpainting.evaluation.utils import load_yaml, SmallMode +from saicinpainting.training.data.masks import MixedMaskGenerator + + +class MakeManyMasksWrapper: + def __init__(self, impl, variants_n=2): + self.impl = impl + self.variants_n = variants_n + + def get_masks(self, img): + img = np.transpose(np.array(img), (2, 0, 1)) + return [self.impl(img)[0] for _ in range(self.variants_n)] + + +def process_images(src_images, indir, outdir, config): + if config.generator_kind == 'segmentation': + mask_generator = SegmentationMask(**config.mask_generator_kwargs) + elif config.generator_kind == 'random': + variants_n = config.mask_generator_kwargs.pop('variants_n', 2) + mask_generator = MakeManyMasksWrapper(MixedMaskGenerator(**config.mask_generator_kwargs), + variants_n=variants_n) + else: + raise ValueError(f'Unexpected generator kind: {config.generator_kind}') + + max_tamper_area = config.get('max_tamper_area', 1) + + for infile in src_images: + try: + file_relpath = infile[len(indir):] + img_outpath = os.path.join(outdir, file_relpath) + os.makedirs(os.path.dirname(img_outpath), exist_ok=True) + + image = Image.open(infile).convert('RGB') + + # scale input image to output resolution and filter smaller images + if min(image.size) < config.cropping.out_min_size: + handle_small_mode = SmallMode(config.cropping.handle_small_mode) + if handle_small_mode == SmallMode.DROP: + continue + elif handle_small_mode == SmallMode.UPSCALE: + factor = config.cropping.out_min_size / min(image.size) + out_size = (np.array(image.size) * factor).round().astype('uint32') + image = image.resize(out_size, resample=Image.BICUBIC) + else: + factor = config.cropping.out_min_size / min(image.size) + out_size = (np.array(image.size) * factor).round().astype('uint32') + image = image.resize(out_size, resample=Image.BICUBIC) + + # generate and select masks + src_masks = mask_generator.get_masks(image) + + filtered_image_mask_pairs = [] + for cur_mask in src_masks: + if config.cropping.out_square_crop: + (crop_left, + crop_top, + crop_right, + crop_bottom) = propose_random_square_crop(cur_mask, + min_overlap=config.cropping.crop_min_overlap) + cur_mask = cur_mask[crop_top:crop_bottom, crop_left:crop_right] + cur_image = image.copy().crop((crop_left, crop_top, crop_right, crop_bottom)) + else: + cur_image = image + + if len(np.unique(cur_mask)) == 0 or cur_mask.mean() > max_tamper_area: + continue + + filtered_image_mask_pairs.append((cur_image, cur_mask)) + + mask_indices = np.random.choice(len(filtered_image_mask_pairs), + size=min(len(filtered_image_mask_pairs), config.max_masks_per_image), + replace=False) + + # crop masks; save masks together with input image + mask_basename = os.path.join(outdir, os.path.splitext(file_relpath)[0]) + for i, idx in enumerate(mask_indices): + cur_image, cur_mask = filtered_image_mask_pairs[idx] + cur_basename = mask_basename + f'_crop{i:03d}' + Image.fromarray(np.clip(cur_mask * 255, 0, 255).astype('uint8'), + mode='L').save(cur_basename + f'_mask{i:03d}.png') + cur_image.save(cur_basename + '.png') + except KeyboardInterrupt: + return + except Exception as ex: + print(f'Could not make masks for {infile} due to {ex}:\n{traceback.format_exc()}') + + +def main(args): + if not args.indir.endswith('/'): + args.indir += '/' + + os.makedirs(args.outdir, exist_ok=True) + + config = load_yaml(args.config) + + in_files = list(glob.glob(os.path.join(args.indir, '**', f'*.{args.ext}'), recursive=True)) + if args.n_jobs == 0: + process_images(in_files, args.indir, args.outdir, config) + else: + in_files_n = len(in_files) + chunk_size = in_files_n // args.n_jobs + (1 if in_files_n % args.n_jobs > 0 else 0) + Parallel(n_jobs=args.n_jobs)( + delayed(process_images)(in_files[start:start+chunk_size], args.indir, args.outdir, config) + for start in range(0, len(in_files), chunk_size) + ) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('config', type=str, help='Path to config for dataset generation') + aparser.add_argument('indir', type=str, help='Path to folder with images') + aparser.add_argument('outdir', type=str, help='Path to folder to store aligned images and masks to') + aparser.add_argument('--n-jobs', type=int, default=0, help='How many processes to use') + aparser.add_argument('--ext', type=str, default='jpg', help='Input image extension') + + main(aparser.parse_args()) diff --git a/lama/bin/gen_mask_dataset_hydra.py b/lama/bin/gen_mask_dataset_hydra.py new file mode 100755 index 0000000000000000000000000000000000000000..4f4fdea52315f24f83fbd802e51a1815097d0fcb --- /dev/null +++ b/lama/bin/gen_mask_dataset_hydra.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python3 + +import glob +import os +import shutil +import traceback +import hydra +from omegaconf import OmegaConf + +import PIL.Image as Image +import numpy as np +from joblib import Parallel, delayed + +from saicinpainting.evaluation.masks.mask import SegmentationMask, propose_random_square_crop +from saicinpainting.evaluation.utils import load_yaml, SmallMode +from saicinpainting.training.data.masks import MixedMaskGenerator + + +class MakeManyMasksWrapper: + def __init__(self, impl, variants_n=2): + self.impl = impl + self.variants_n = variants_n + + def get_masks(self, img): + img = np.transpose(np.array(img), (2, 0, 1)) + return [self.impl(img)[0] for _ in range(self.variants_n)] + + +def process_images(src_images, indir, outdir, config): + if config.generator_kind == 'segmentation': + mask_generator = SegmentationMask(**config.mask_generator_kwargs) + elif config.generator_kind == 'random': + mask_generator_kwargs = OmegaConf.to_container(config.mask_generator_kwargs, resolve=True) + variants_n = mask_generator_kwargs.pop('variants_n', 2) + mask_generator = MakeManyMasksWrapper(MixedMaskGenerator(**mask_generator_kwargs), + variants_n=variants_n) + else: + raise ValueError(f'Unexpected generator kind: {config.generator_kind}') + + max_tamper_area = config.get('max_tamper_area', 1) + + for infile in src_images: + try: + file_relpath = infile[len(indir):] + img_outpath = os.path.join(outdir, file_relpath) + os.makedirs(os.path.dirname(img_outpath), exist_ok=True) + + image = Image.open(infile).convert('RGB') + + # scale input image to output resolution and filter smaller images + if min(image.size) < config.cropping.out_min_size: + handle_small_mode = SmallMode(config.cropping.handle_small_mode) + if handle_small_mode == SmallMode.DROP: + continue + elif handle_small_mode == SmallMode.UPSCALE: + factor = config.cropping.out_min_size / min(image.size) + out_size = (np.array(image.size) * factor).round().astype('uint32') + image = image.resize(out_size, resample=Image.BICUBIC) + else: + factor = config.cropping.out_min_size / min(image.size) + out_size = (np.array(image.size) * factor).round().astype('uint32') + image = image.resize(out_size, resample=Image.BICUBIC) + + # generate and select masks + src_masks = mask_generator.get_masks(image) + + filtered_image_mask_pairs = [] + for cur_mask in src_masks: + if config.cropping.out_square_crop: + (crop_left, + crop_top, + crop_right, + crop_bottom) = propose_random_square_crop(cur_mask, + min_overlap=config.cropping.crop_min_overlap) + cur_mask = cur_mask[crop_top:crop_bottom, crop_left:crop_right] + cur_image = image.copy().crop((crop_left, crop_top, crop_right, crop_bottom)) + else: + cur_image = image + + if len(np.unique(cur_mask)) == 0 or cur_mask.mean() > max_tamper_area: + continue + + filtered_image_mask_pairs.append((cur_image, cur_mask)) + + mask_indices = np.random.choice(len(filtered_image_mask_pairs), + size=min(len(filtered_image_mask_pairs), config.max_masks_per_image), + replace=False) + + # crop masks; save masks together with input image + mask_basename = os.path.join(outdir, os.path.splitext(file_relpath)[0]) + for i, idx in enumerate(mask_indices): + cur_image, cur_mask = filtered_image_mask_pairs[idx] + cur_basename = mask_basename + f'_crop{i:03d}' + Image.fromarray(np.clip(cur_mask * 255, 0, 255).astype('uint8'), + mode='L').save(cur_basename + f'_mask{i:03d}.png') + cur_image.save(cur_basename + '.png') + except KeyboardInterrupt: + return + except Exception as ex: + print(f'Could not make masks for {infile} due to {ex}:\n{traceback.format_exc()}') + + +@hydra.main(config_path='../configs/data_gen/whydra', config_name='random_medium_256.yaml') +def main(config: OmegaConf): + if not config.indir.endswith('/'): + config.indir += '/' + + os.makedirs(config.outdir, exist_ok=True) + + in_files = list(glob.glob(os.path.join(config.indir, '**', f'*.{config.location.extension}'), + recursive=True)) + if config.n_jobs == 0: + process_images(in_files, config.indir, config.outdir, config) + else: + in_files_n = len(in_files) + chunk_size = in_files_n // config.n_jobs + (1 if in_files_n % config.n_jobs > 0 else 0) + Parallel(n_jobs=config.n_jobs)( + delayed(process_images)(in_files[start:start+chunk_size], config.indir, config.outdir, config) + for start in range(0, len(in_files), chunk_size) + ) + + +if __name__ == '__main__': + main() diff --git a/lama/bin/gen_outpainting_dataset.py b/lama/bin/gen_outpainting_dataset.py new file mode 100755 index 0000000000000000000000000000000000000000..72f6fc16c372fbc0aec9643c7be1c44ce5efeba4 --- /dev/null +++ b/lama/bin/gen_outpainting_dataset.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +import glob +import logging +import os +import shutil +import sys +import traceback + +from saicinpainting.evaluation.data import load_image +from saicinpainting.evaluation.utils import move_to_device + +os.environ['OMP_NUM_THREADS'] = '1' +os.environ['OPENBLAS_NUM_THREADS'] = '1' +os.environ['MKL_NUM_THREADS'] = '1' +os.environ['VECLIB_MAXIMUM_THREADS'] = '1' +os.environ['NUMEXPR_NUM_THREADS'] = '1' + +import cv2 +import hydra +import numpy as np +import torch +import tqdm +import yaml +from omegaconf import OmegaConf +from torch.utils.data._utils.collate import default_collate + +from saicinpainting.training.data.datasets import make_default_val_dataset +from saicinpainting.training.trainers import load_checkpoint +from saicinpainting.utils import register_debug_signal_handlers + +LOGGER = logging.getLogger(__name__) + + +def main(args): + try: + if not args.indir.endswith('/'): + args.indir += '/' + + for in_img in glob.glob(os.path.join(args.indir, '**', '*' + args.img_suffix), recursive=True): + if 'mask' in os.path.basename(in_img): + continue + + out_img_path = os.path.join(args.outdir, os.path.splitext(in_img[len(args.indir):])[0] + '.png') + out_mask_path = f'{os.path.splitext(out_img_path)[0]}_mask.png' + + os.makedirs(os.path.dirname(out_img_path), exist_ok=True) + + img = load_image(in_img) + height, width = img.shape[1:] + pad_h, pad_w = int(height * args.coef / 2), int(width * args.coef / 2) + + mask = np.zeros((height, width), dtype='uint8') + + if args.expand: + img = np.pad(img, ((0, 0), (pad_h, pad_h), (pad_w, pad_w))) + mask = np.pad(mask, ((pad_h, pad_h), (pad_w, pad_w)), mode='constant', constant_values=255) + else: + mask[:pad_h] = 255 + mask[-pad_h:] = 255 + mask[:, :pad_w] = 255 + mask[:, -pad_w:] = 255 + + # img = np.pad(img, ((0, 0), (pad_h * 2, pad_h * 2), (pad_w * 2, pad_w * 2)), mode='symmetric') + # mask = np.pad(mask, ((pad_h * 2, pad_h * 2), (pad_w * 2, pad_w * 2)), mode = 'symmetric') + + img = np.clip(np.transpose(img, (1, 2, 0)) * 255, 0, 255).astype('uint8') + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + cv2.imwrite(out_img_path, img) + + cv2.imwrite(out_mask_path, mask) + except KeyboardInterrupt: + LOGGER.warning('Interrupted by user') + except Exception as ex: + LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}') + sys.exit(1) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('indir', type=str, help='Root directory with images') + aparser.add_argument('outdir', type=str, help='Where to store results') + aparser.add_argument('--img-suffix', type=str, default='.png', help='Input image extension') + aparser.add_argument('--expand', action='store_true', help='Generate mask by padding (true) or by cropping (false)') + aparser.add_argument('--coef', type=float, default=0.2, help='How much to crop/expand in order to get masks') + + main(aparser.parse_args()) diff --git a/lama/bin/make_checkpoint.py b/lama/bin/make_checkpoint.py new file mode 100755 index 0000000000000000000000000000000000000000..322147483915bef758770ae931e705e56083fa8d --- /dev/null +++ b/lama/bin/make_checkpoint.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +import os +import shutil + +import torch + + +def get_checkpoint_files(s): + s = s.strip() + if ',' in s: + return [get_checkpoint_files(chunk) for chunk in s.split(',')] + return 'last.ckpt' if s == 'last' else f'{s}.ckpt' + + +def main(args): + checkpoint_fnames = get_checkpoint_files(args.epochs) + if isinstance(checkpoint_fnames, str): + checkpoint_fnames = [checkpoint_fnames] + assert len(checkpoint_fnames) >= 1 + + checkpoint_path = os.path.join(args.indir, 'models', checkpoint_fnames[0]) + checkpoint = torch.load(checkpoint_path, map_location='cpu') + del checkpoint['optimizer_states'] + + if len(checkpoint_fnames) > 1: + for fname in checkpoint_fnames[1:]: + print('sum', fname) + sum_tensors_cnt = 0 + other_cp = torch.load(os.path.join(args.indir, 'models', fname), map_location='cpu') + for k in checkpoint['state_dict'].keys(): + if checkpoint['state_dict'][k].dtype is torch.float: + checkpoint['state_dict'][k].data.add_(other_cp['state_dict'][k].data) + sum_tensors_cnt += 1 + print('summed', sum_tensors_cnt, 'tensors') + + for k in checkpoint['state_dict'].keys(): + if checkpoint['state_dict'][k].dtype is torch.float: + checkpoint['state_dict'][k].data.mul_(1 / float(len(checkpoint_fnames))) + + state_dict = checkpoint['state_dict'] + + if not args.leave_discriminators: + for k in list(state_dict.keys()): + if k.startswith('discriminator.'): + del state_dict[k] + + if not args.leave_losses: + for k in list(state_dict.keys()): + if k.startswith('loss_'): + del state_dict[k] + + out_checkpoint_path = os.path.join(args.outdir, 'models', 'best.ckpt') + os.makedirs(os.path.dirname(out_checkpoint_path), exist_ok=True) + + torch.save(checkpoint, out_checkpoint_path) + + shutil.copy2(os.path.join(args.indir, 'config.yaml'), + os.path.join(args.outdir, 'config.yaml')) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('indir', + help='Path to directory with output of training ' + '(i.e. directory, which has samples, modules, config.yaml and train.log') + aparser.add_argument('outdir', + help='Where to put minimal checkpoint, which can be consumed by "bin/predict.py"') + aparser.add_argument('--epochs', type=str, default='last', + help='Which checkpoint to take. ' + 'Can be "last" or integer - number of epoch') + aparser.add_argument('--leave-discriminators', action='store_true', + help='If enabled, the state of discriminators will not be removed from the checkpoint') + aparser.add_argument('--leave-losses', action='store_true', + help='If enabled, weights of nn-based losses (e.g. perceptual) will not be removed') + + main(aparser.parse_args()) diff --git a/lama/bin/mask_example.py b/lama/bin/mask_example.py new file mode 100644 index 0000000000000000000000000000000000000000..59e25ca8eb3ed4141851c3af284fc66285444de0 --- /dev/null +++ b/lama/bin/mask_example.py @@ -0,0 +1,14 @@ +import matplotlib.pyplot as plt +from skimage import io +from skimage.transform import resize + +from saicinpainting.evaluation.masks.mask import SegmentationMask + +im = io.imread('imgs/ex4.jpg') +im = resize(im, (512, 1024), anti_aliasing=True) +mask_seg = SegmentationMask(num_variants_per_mask=10) +mask_examples = mask_seg.get_masks(im) +for i, example in enumerate(mask_examples): + plt.imshow(example) + plt.show() + plt.imsave(f'tmp/img_masks/{i}.png', example) diff --git a/lama/bin/paper_runfiles/blur_tests.sh b/lama/bin/paper_runfiles/blur_tests.sh new file mode 100755 index 0000000000000000000000000000000000000000..8f204a4c643d08935e5561ed27a286536643958d --- /dev/null +++ b/lama/bin/paper_runfiles/blur_tests.sh @@ -0,0 +1,37 @@ +##!/usr/bin/env bash +# +## !!! file set to make test_large_30k from the vanilla test_large: configs/test_large_30k.lst +# +## paths to data are valid for mml7 +#PLACES_ROOT="/data/inpainting/Places365" +#OUT_DIR="/data/inpainting/paper_data/Places365_val_test" +# +#source "$(dirname $0)/env.sh" +# +#for datadir in test_large_30k # val_large +#do +# for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512 +# do +# "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ +# "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 8 +# +# "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" +# done +# +# for conf in segm_256 segm_512 +# do +# "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ +# "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 2 +# +# "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" +# done +#done +# +#IN_DIR="/data/inpainting/paper_data/Places365_val_test/test_large_30k/random_medium_512" +#PRED_DIR="/data/inpainting/predictions/final/images/r.suvorov_2021-03-05_17-08-35_train_ablv2_work_resume_epoch37/random_medium_512" +#BLUR_OUT_DIR="/data/inpainting/predictions/final/blur/images" +# +#for b in 0.1 +# +#"$BINDIR/blur_predicts.py" "$BASEDIR/../../configs/eval2.yaml" "$CUR_IN_DIR" "$CUR_OUT_DIR" "$CUR_EVAL_DIR" +# diff --git a/lama/bin/paper_runfiles/env.sh b/lama/bin/paper_runfiles/env.sh new file mode 100644 index 0000000000000000000000000000000000000000..f3052f0ea1672a569e7775f8c54967d730a7b5ec --- /dev/null +++ b/lama/bin/paper_runfiles/env.sh @@ -0,0 +1,8 @@ +DIRNAME="$(dirname $0)" +DIRNAME="$(realpath ""$DIRNAME"")" + +BINDIR="$DIRNAME/.." +SRCDIR="$BINDIR/.." +CONFIGDIR="$SRCDIR/configs" + +export PYTHONPATH="$SRCDIR:$PYTHONPATH" diff --git a/lama/bin/paper_runfiles/find_best_checkpoint.py b/lama/bin/paper_runfiles/find_best_checkpoint.py new file mode 100755 index 0000000000000000000000000000000000000000..42f5e0f9bb1a2ea25dd9a97a58cf318e6de19532 --- /dev/null +++ b/lama/bin/paper_runfiles/find_best_checkpoint.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + + +import os +from argparse import ArgumentParser + + +def ssim_fid100_f1(metrics, fid_scale=100): + ssim = metrics.loc['total', 'ssim']['mean'] + fid = metrics.loc['total', 'fid']['mean'] + fid_rel = max(0, fid_scale - fid) / fid_scale + f1 = 2 * ssim * fid_rel / (ssim + fid_rel + 1e-3) + return f1 + + +def find_best_checkpoint(model_list, models_dir): + with open(model_list) as f: + models = [m.strip() for m in f.readlines()] + with open(f'{model_list}_best', 'w') as f: + for model in models: + print(model) + best_f1 = 0 + best_epoch = 0 + best_step = 0 + with open(os.path.join(models_dir, model, 'train.log')) as fm: + lines = fm.readlines() + for line_index in range(len(lines)): + line = lines[line_index] + if 'Validation metrics after epoch' in line: + sharp_index = line.index('#') + cur_ep = line[sharp_index + 1:] + comma_index = cur_ep.index(',') + cur_ep = int(cur_ep[:comma_index]) + total_index = line.index('total ') + step = int(line[total_index:].split()[1].strip()) + total_line = lines[line_index + 5] + if not total_line.startswith('total'): + continue + words = total_line.strip().split() + f1 = float(words[-1]) + print(f'\tEpoch: {cur_ep}, f1={f1}') + if f1 > best_f1: + best_f1 = f1 + best_epoch = cur_ep + best_step = step + f.write(f'{model}\t{best_epoch}\t{best_step}\t{best_f1}\n') + + +if __name__ == '__main__': + parser = ArgumentParser() + parser.add_argument('model_list') + parser.add_argument('models_dir') + args = parser.parse_args() + find_best_checkpoint(args.model_list, args.models_dir) diff --git a/lama/bin/paper_runfiles/generate_test_celeba-hq.sh b/lama/bin/paper_runfiles/generate_test_celeba-hq.sh new file mode 100755 index 0000000000000000000000000000000000000000..7e04bba426f1c6c0528d88a0e28a5da0dde7ca3e --- /dev/null +++ b/lama/bin/paper_runfiles/generate_test_celeba-hq.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# paths to data are valid for mml-ws01 +OUT_DIR="/media/inpainting/paper_data/CelebA-HQ_val_test" + +source "$(dirname $0)/env.sh" + +for datadir in "val" "test" +do + for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512 + do + "$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-celeba-hq \ + location.out_dir=$OUT_DIR cropping.out_square_crop=False + + "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" + done +done diff --git a/lama/bin/paper_runfiles/generate_test_ffhq.sh b/lama/bin/paper_runfiles/generate_test_ffhq.sh new file mode 100755 index 0000000000000000000000000000000000000000..a1b79cb0f3f710eed21a978c3a1489ca830bb7f8 --- /dev/null +++ b/lama/bin/paper_runfiles/generate_test_ffhq.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# paths to data are valid for mml-ws01 +OUT_DIR="/media/inpainting/paper_data/FFHQ_val" + +source "$(dirname $0)/env.sh" + +for datadir in test +do + for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512 + do + "$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-ffhq \ + location.out_dir=$OUT_DIR cropping.out_square_crop=False + + "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" + done +done diff --git a/lama/bin/paper_runfiles/generate_test_paris.sh b/lama/bin/paper_runfiles/generate_test_paris.sh new file mode 100755 index 0000000000000000000000000000000000000000..66056017c3aa376ef0767a59583ab25a321b559b --- /dev/null +++ b/lama/bin/paper_runfiles/generate_test_paris.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# paths to data are valid for mml-ws01 +OUT_DIR="/media/inpainting/paper_data/Paris_StreetView_Dataset_val" + +source "$(dirname $0)/env.sh" + +for datadir in paris_eval_gt +do + for conf in random_thin_256 random_medium_256 random_thick_256 segm_256 + do + "$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-paris \ + location.out_dir=OUT_DIR cropping.out_square_crop=False cropping.out_min_size=227 + + "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" + done +done diff --git a/lama/bin/paper_runfiles/generate_test_paris_256.sh b/lama/bin/paper_runfiles/generate_test_paris_256.sh new file mode 100755 index 0000000000000000000000000000000000000000..67061298b601ce4e1c37966852421f2153a0d686 --- /dev/null +++ b/lama/bin/paper_runfiles/generate_test_paris_256.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# paths to data are valid for mml-ws01 +OUT_DIR="/media/inpainting/paper_data/Paris_StreetView_Dataset_val_256" + +source "$(dirname $0)/env.sh" + +for datadir in paris_eval_gt +do + for conf in random_thin_256 random_medium_256 random_thick_256 segm_256 + do + "$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-paris \ + location.out_dir=$OUT_DIR cropping.out_square_crop=False cropping.out_min_size=256 + + "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" + done +done diff --git a/lama/bin/paper_runfiles/generate_val_test.sh b/lama/bin/paper_runfiles/generate_val_test.sh new file mode 100755 index 0000000000000000000000000000000000000000..d9b2a370ceeeb8f401706f4303298db13e5fad91 --- /dev/null +++ b/lama/bin/paper_runfiles/generate_val_test.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +# !!! file set to make test_large_30k from the vanilla test_large: configs/test_large_30k.lst + +# paths to data are valid for mml7 +PLACES_ROOT="/data/inpainting/Places365" +OUT_DIR="/data/inpainting/paper_data/Places365_val_test" + +source "$(dirname $0)/env.sh" + +for datadir in test_large_30k # val_large +do + for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512 + do + "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ + "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 8 + + "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" + done + + for conf in segm_256 segm_512 + do + "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ + "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 2 + + "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" + done +done diff --git a/lama/bin/paper_runfiles/predict_inner_features.sh b/lama/bin/paper_runfiles/predict_inner_features.sh new file mode 100755 index 0000000000000000000000000000000000000000..864c1a0fca8b93b2a193656e45ff55f6a051eb8c --- /dev/null +++ b/lama/bin/paper_runfiles/predict_inner_features.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# paths to data are valid for mml7 + +source "$(dirname $0)/env.sh" + +"$BINDIR/predict_inner_features.py" \ + -cn default_inner_features_ffc \ + model.path="/data/inpainting/paper_data/final_models/ours/r.suvorov_2021-03-05_17-34-05_train_ablv2_work_ffc075_resume_epoch39" \ + indir="/data/inpainting/paper_data/inner_features_vis/input/" \ + outdir="/data/inpainting/paper_data/inner_features_vis/output/ffc" \ + dataset.img_suffix=.png + + +"$BINDIR/predict_inner_features.py" \ + -cn default_inner_features_work \ + model.path="/data/inpainting/paper_data/final_models/ours/r.suvorov_2021-03-05_17-08-35_train_ablv2_work_resume_epoch37" \ + indir="/data/inpainting/paper_data/inner_features_vis/input/" \ + outdir="/data/inpainting/paper_data/inner_features_vis/output/work" \ + dataset.img_suffix=.png diff --git a/lama/bin/paper_runfiles/update_test_data_stats.sh b/lama/bin/paper_runfiles/update_test_data_stats.sh new file mode 100755 index 0000000000000000000000000000000000000000..ff77d586f308202fbd019d8cc4be641f0d6aa1a5 --- /dev/null +++ b/lama/bin/paper_runfiles/update_test_data_stats.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# paths to data are valid for mml7 + +source "$(dirname $0)/env.sh" + +#INDIR="/data/inpainting/paper_data/Places365_val_test/test_large_30k" +# +#for dataset in random_medium_256 random_medium_512 random_thick_256 random_thick_512 random_thin_256 random_thin_512 +#do +# "$BINDIR/calc_dataset_stats.py" "$INDIR/$dataset" "$INDIR/${dataset}_stats2" +#done +# +#"$BINDIR/calc_dataset_stats.py" "/data/inpainting/evalset2" "/data/inpainting/evalset2_stats2" + + +INDIR="/data/inpainting/paper_data/CelebA-HQ_val_test/test" + +for dataset in random_medium_256 random_thick_256 random_thin_256 +do + "$BINDIR/calc_dataset_stats.py" "$INDIR/$dataset" "$INDIR/${dataset}_stats2" +done + + +INDIR="/data/inpainting/paper_data/Paris_StreetView_Dataset_val_256/paris_eval_gt" + +for dataset in random_medium_256 random_thick_256 random_thin_256 +do + "$BINDIR/calc_dataset_stats.py" "$INDIR/$dataset" "$INDIR/${dataset}_stats2" +done \ No newline at end of file diff --git a/lama/bin/predict.py b/lama/bin/predict.py new file mode 100755 index 0000000000000000000000000000000000000000..66d735094db39bb27fe2353316ae847e5c8b0fe8 --- /dev/null +++ b/lama/bin/predict.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 + +# Example command: +# ./bin/predict.py \ +# model.path= \ +# indir= \ +# outdir= + +import logging +import os +import sys +import traceback +#import os +#import sys +import inspect + +currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +parentdir = os.path.dirname(currentdir) +sys.path.insert(0, parentdir) + +# from saicinpainting.evaluation.utils import move_to_device +# from saicinpainting.evaluation.refinement import refine_predict +os.environ['OMP_NUM_THREADS'] = '1' +os.environ['OPENBLAS_NUM_THREADS'] = '1' +os.environ['MKL_NUM_THREADS'] = '1' +os.environ['VECLIB_MAXIMUM_THREADS'] = '1' +os.environ['NUMEXPR_NUM_THREADS'] = '1' + +import cv2 +import hydra +import numpy as np +import torch +import tqdm +import yaml +from omegaconf import OmegaConf +from torch.utils.data._utils.collate import default_collate + +# from saicinpainting.training.data.datasets import make_default_val_dataset +# from saicinpainting.training.trainers import load_checkpoint +# from saicinpainting.utils import register_debug_signal_handlers + +LOGGER = logging.getLogger(__name__) + + +@hydra.main(config_path='../configs/prediction', config_name='default.yaml') +def main(predict_config: OmegaConf): + for k in predict_config.keys(): + print(k, predict_config[k]) +# try: +# register_debug_signal_handlers() # kill -10 will result in traceback dumped into log + +# device = torch.device(predict_config.device) + +# train_config_path = os.path.join(predict_config.model.path, 'config.yaml') +# with open(train_config_path, 'r') as f: +# train_config = OmegaConf.create(yaml.safe_load(f)) + +# train_config.training_model.predict_only = True +# train_config.visualizer.kind = 'noop' + +# out_ext = predict_config.get('out_ext', '.png') + +# checkpoint_path = os.path.join(predict_config.model.path, +# 'models', +# predict_config.model.checkpoint) +# model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location='cpu') +# model.freeze() +# if not predict_config.get('refine', False): +# model.to(device) + +# if not predict_config.indir.endswith('/'): +# predict_config.indir += '/' + +# dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset) +# for img_i in tqdm.trange(len(dataset)): +# mask_fname = dataset.mask_filenames[img_i] +# cur_out_fname = os.path.join( +# predict_config.outdir, +# os.path.splitext(mask_fname[len(predict_config.indir):])[0] + out_ext +# ) +# os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) +# batch = default_collate([dataset[img_i]]) +# if predict_config.get('refine', False): +# assert 'unpad_to_size' in batch, "Unpadded size is required for the refinement" +# # image unpadding is taken care of in the refiner, so that output image +# # is same size as the input image +# cur_res = refine_predict(batch, model, **predict_config.refiner) +# cur_res = cur_res[0].permute(1,2,0).detach().cpu().numpy() +# else: +# with torch.no_grad(): +# batch = move_to_device(batch, device) +# batch['mask'] = (batch['mask'] > 0) * 1 +# batch = model(batch) +# cur_res = batch[predict_config.out_key][0].permute(1, 2, 0).detach().cpu().numpy() +# unpad_to_size = batch.get('unpad_to_size', None) +# if unpad_to_size is not None: +# orig_height, orig_width = unpad_to_size +# cur_res = cur_res[:orig_height, :orig_width] + +# cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8') +# cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) +# cv2.imwrite(cur_out_fname, cur_res) + +# except KeyboardInterrupt: +# LOGGER.warning('Interrupted by user') +# except Exception as ex: +# LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}') +# sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/lama/bin/predict_inner_features.py b/lama/bin/predict_inner_features.py new file mode 100755 index 0000000000000000000000000000000000000000..4f9f7a11a6c4757a4eaa05cf1ac648d372f7e02f --- /dev/null +++ b/lama/bin/predict_inner_features.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 + +# Example command: +# ./bin/predict.py \ +# model.path= \ +# indir= \ +# outdir= + +import logging +import os +import sys +import traceback + +from saicinpainting.evaluation.utils import move_to_device + +os.environ['OMP_NUM_THREADS'] = '1' +os.environ['OPENBLAS_NUM_THREADS'] = '1' +os.environ['MKL_NUM_THREADS'] = '1' +os.environ['VECLIB_MAXIMUM_THREADS'] = '1' +os.environ['NUMEXPR_NUM_THREADS'] = '1' + +import cv2 +import hydra +import numpy as np +import torch +import tqdm +import yaml +from omegaconf import OmegaConf +from torch.utils.data._utils.collate import default_collate + +from saicinpainting.training.data.datasets import make_default_val_dataset +from saicinpainting.training.trainers import load_checkpoint, DefaultInpaintingTrainingModule +from saicinpainting.utils import register_debug_signal_handlers, get_shape + +LOGGER = logging.getLogger(__name__) + + +@hydra.main(config_path='../configs/prediction', config_name='default_inner_features.yaml') +def main(predict_config: OmegaConf): + try: + register_debug_signal_handlers() # kill -10 will result in traceback dumped into log + + device = torch.device(predict_config.device) + + train_config_path = os.path.join(predict_config.model.path, 'config.yaml') + with open(train_config_path, 'r') as f: + train_config = OmegaConf.create(yaml.safe_load(f)) + + checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint) + model = load_checkpoint(train_config, checkpoint_path, strict=False) + model.freeze() + model.to(device) + + assert isinstance(model, DefaultInpaintingTrainingModule), 'Only DefaultInpaintingTrainingModule is supported' + assert isinstance(getattr(model.generator, 'model', None), torch.nn.Sequential) + + if not predict_config.indir.endswith('/'): + predict_config.indir += '/' + + dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset) + + max_level = max(predict_config.levels) + + with torch.no_grad(): + for img_i in tqdm.trange(len(dataset)): + mask_fname = dataset.mask_filenames[img_i] + cur_out_fname = os.path.join(predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir):])[0]) + os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) + + batch = move_to_device(default_collate([dataset[img_i]]), device) + + img = batch['image'] + mask = batch['mask'] + mask[:] = 0 + mask_h, mask_w = mask.shape[-2:] + mask[:, :, + mask_h // 2 - predict_config.hole_radius : mask_h // 2 + predict_config.hole_radius, + mask_w // 2 - predict_config.hole_radius : mask_w // 2 + predict_config.hole_radius] = 1 + + masked_img = torch.cat([img * (1 - mask), mask], dim=1) + + feats = masked_img + for level_i, level in enumerate(model.generator.model): + feats = level(feats) + if level_i in predict_config.levels: + cur_feats = torch.cat([f for f in feats if torch.is_tensor(f)], dim=1) \ + if isinstance(feats, tuple) else feats + + if predict_config.slice_channels: + cur_feats = cur_feats[:, slice(*predict_config.slice_channels)] + + cur_feat = cur_feats.pow(2).mean(1).pow(0.5).clone() + cur_feat -= cur_feat.min() + cur_feat /= cur_feat.std() + cur_feat = cur_feat.clamp(0, 1) / 1 + cur_feat = cur_feat.cpu().numpy()[0] + cur_feat *= 255 + cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') + cv2.imwrite(cur_out_fname + f'_lev{level_i:02d}_norm.png', cur_feat) + + # for channel_i in predict_config.channels: + # + # cur_feat = cur_feats[0, channel_i].clone().detach().cpu().numpy() + # cur_feat -= cur_feat.min() + # cur_feat /= cur_feat.max() + # cur_feat *= 255 + # cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') + # cv2.imwrite(cur_out_fname + f'_lev{level_i}_ch{channel_i}.png', cur_feat) + elif level_i >= max_level: + break + except KeyboardInterrupt: + LOGGER.warning('Interrupted by user') + except Exception as ex: + LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}') + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/lama/bin/report_from_tb.py b/lama/bin/report_from_tb.py new file mode 100755 index 0000000000000000000000000000000000000000..9a444e6cd8027f88bd34adfc0b1dd000bbb4b2be --- /dev/null +++ b/lama/bin/report_from_tb.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import glob +import os +import re + +import tensorflow as tf +from torch.utils.tensorboard import SummaryWriter + + +GROUPING_RULES = [ + re.compile(r'^(?Ptrain|test|val|extra_val_.*?(256|512))_(?P.*)', re.I) +] + + +DROP_RULES = [ + re.compile(r'_std$', re.I) +] + + +def need_drop(tag): + for rule in DROP_RULES: + if rule.search(tag): + return True + return False + + +def get_group_and_title(tag): + for rule in GROUPING_RULES: + match = rule.search(tag) + if match is None: + continue + return match.group('group'), match.group('title') + return None, None + + +def main(args): + os.makedirs(args.outdir, exist_ok=True) + + ignored_events = set() + + for orig_fname in glob.glob(args.inglob): + cur_dirpath = os.path.dirname(orig_fname) # remove filename, this should point to "version_0" directory + subdirname = os.path.basename(cur_dirpath) # == "version_0" most of time + exp_root_path = os.path.dirname(cur_dirpath) # remove "version_0" + exp_name = os.path.basename(exp_root_path) + + writers_by_group = {} + + for e in tf.compat.v1.train.summary_iterator(orig_fname): + for v in e.summary.value: + if need_drop(v.tag): + continue + + cur_group, cur_title = get_group_and_title(v.tag) + if cur_group is None: + if v.tag not in ignored_events: + print(f'WARNING: Could not detect group for {v.tag}, ignoring it') + ignored_events.add(v.tag) + continue + + cur_writer = writers_by_group.get(cur_group, None) + if cur_writer is None: + if args.include_version: + cur_outdir = os.path.join(args.outdir, exp_name, f'{subdirname}_{cur_group}') + else: + cur_outdir = os.path.join(args.outdir, exp_name, cur_group) + cur_writer = SummaryWriter(cur_outdir) + writers_by_group[cur_group] = cur_writer + + cur_writer.add_scalar(cur_title, v.simple_value, global_step=e.step, walltime=e.wall_time) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('inglob', type=str) + aparser.add_argument('outdir', type=str) + aparser.add_argument('--include-version', action='store_true', + help='Include subdirectory name e.g. "version_0" into output path') + + main(aparser.parse_args()) diff --git a/lama/bin/sample_from_dataset.py b/lama/bin/sample_from_dataset.py new file mode 100755 index 0000000000000000000000000000000000000000..31593b3212454dd0b6f74a39195a34b489df20a1 --- /dev/null +++ b/lama/bin/sample_from_dataset.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 + +import os + +import numpy as np +import tqdm +from skimage import io +from skimage.segmentation import mark_boundaries + +from saicinpainting.evaluation.data import InpaintingDataset +from saicinpainting.evaluation.vis import save_item_for_vis + +def save_mask_for_sidebyside(item, out_file): + mask = item['mask']# > 0.5 + if mask.ndim == 3: + mask = mask[0] + mask = np.clip(mask * 255, 0, 255).astype('uint8') + io.imsave(out_file, mask) + +def save_img_for_sidebyside(item, out_file): + img = np.transpose(item['image'], (1, 2, 0)) + img = np.clip(img * 255, 0, 255).astype('uint8') + io.imsave(out_file, img) + +def save_masked_img_for_sidebyside(item, out_file): + mask = item['mask'] + img = item['image'] + + img = (1-mask) * img + mask + img = np.transpose(img, (1, 2, 0)) + + img = np.clip(img * 255, 0, 255).astype('uint8') + io.imsave(out_file, img) + +def main(args): + dataset = InpaintingDataset(args.datadir, img_suffix='.png') + + area_bins = np.linspace(0, 1, args.area_bins + 1) + + heights = [] + widths = [] + image_areas = [] + hole_areas = [] + hole_area_percents = [] + area_bins_count = np.zeros(args.area_bins) + area_bin_titles = [f'{area_bins[i] * 100:.0f}-{area_bins[i + 1] * 100:.0f}' for i in range(args.area_bins)] + + bin2i = [[] for _ in range(args.area_bins)] + + for i, item in enumerate(tqdm.tqdm(dataset)): + h, w = item['image'].shape[1:] + heights.append(h) + widths.append(w) + full_area = h * w + image_areas.append(full_area) + hole_area = (item['mask'] == 1).sum() + hole_areas.append(hole_area) + hole_percent = hole_area / full_area + hole_area_percents.append(hole_percent) + bin_i = np.clip(np.searchsorted(area_bins, hole_percent) - 1, 0, len(area_bins_count) - 1) + area_bins_count[bin_i] += 1 + bin2i[bin_i].append(i) + + os.makedirs(args.outdir, exist_ok=True) + + for bin_i in range(args.area_bins): + bindir = os.path.join(args.outdir, area_bin_titles[bin_i]) + os.makedirs(bindir, exist_ok=True) + bin_idx = bin2i[bin_i] + for sample_i in np.random.choice(bin_idx, size=min(len(bin_idx), args.samples_n), replace=False): + item = dataset[sample_i] + path = os.path.join(bindir, dataset.img_filenames[sample_i].split('/')[-1]) + save_masked_img_for_sidebyside(item, path) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('--datadir', type=str, + help='Path to folder with images and masks (output of gen_mask_dataset.py)') + aparser.add_argument('--outdir', type=str, help='Where to put results') + aparser.add_argument('--samples-n', type=int, default=10, + help='Number of sample images with masks to copy for visualization for each area bin') + aparser.add_argument('--area-bins', type=int, default=10, help='How many area bins to have') + + main(aparser.parse_args()) diff --git a/lama/bin/side_by_side.py b/lama/bin/side_by_side.py new file mode 100755 index 0000000000000000000000000000000000000000..8ba7a42a3b8597552b8002d1eb245d5776aff7f7 --- /dev/null +++ b/lama/bin/side_by_side.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +import os +import random + +import cv2 +import numpy as np + +from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset +from saicinpainting.evaluation.utils import load_yaml +from saicinpainting.training.visualizers.base import visualize_mask_and_images + + +def main(args): + config = load_yaml(args.config) + + datasets = [PrecomputedInpaintingResultsDataset(args.datadir, cur_predictdir, **config.dataset_kwargs) + for cur_predictdir in args.predictdirs] + assert len({len(ds) for ds in datasets}) == 1 + len_first = len(datasets[0]) + + indices = list(range(len_first)) + if len_first > args.max_n: + indices = sorted(random.sample(indices, args.max_n)) + + os.makedirs(args.outpath, exist_ok=True) + + filename2i = {} + + keys = ['image'] + [i for i in range(len(datasets))] + for img_i in indices: + try: + mask_fname = os.path.basename(datasets[0].mask_filenames[img_i]) + if mask_fname in filename2i: + filename2i[mask_fname] += 1 + idx = filename2i[mask_fname] + mask_fname_only, ext = os.path.split(mask_fname) + mask_fname = f'{mask_fname_only}_{idx}{ext}' + else: + filename2i[mask_fname] = 1 + + cur_vis_dict = datasets[0][img_i] + for ds_i, ds in enumerate(datasets): + cur_vis_dict[ds_i] = ds[img_i]['inpainted'] + + vis_img = visualize_mask_and_images(cur_vis_dict, keys, + last_without_mask=False, + mask_only_first=True, + black_mask=args.black) + vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8') + + out_fname = os.path.join(args.outpath, mask_fname) + + + + vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR) + cv2.imwrite(out_fname, vis_img) + except Exception as ex: + print(f'Could not process {img_i} due to {ex}') + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('--max-n', type=int, default=100, help='Maximum number of images to print') + aparser.add_argument('--black', action='store_true', help='Whether to fill mask on GT with black') + aparser.add_argument('config', type=str, help='Path to evaluation config (e.g. configs/eval1.yaml)') + aparser.add_argument('outpath', type=str, help='Where to put results') + aparser.add_argument('datadir', type=str, + help='Path to folder with images and masks') + aparser.add_argument('predictdirs', type=str, + nargs='+', + help='Path to folders with predicts') + + + main(aparser.parse_args()) diff --git a/lama/bin/split_tar.py b/lama/bin/split_tar.py new file mode 100755 index 0000000000000000000000000000000000000000..ac1692addbb4191200c8c871fe356bb80d534c44 --- /dev/null +++ b/lama/bin/split_tar.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 + + +import tqdm +import webdataset as wds + + +def main(args): + input_dataset = wds.Dataset(args.infile) + output_dataset = wds.ShardWriter(args.outpattern) + for rec in tqdm.tqdm(input_dataset): + output_dataset.write(rec) + + +if __name__ == '__main__': + import argparse + + aparser = argparse.ArgumentParser() + aparser.add_argument('infile', type=str) + aparser.add_argument('outpattern', type=str) + + main(aparser.parse_args()) diff --git a/lama/bin/to_jit.py b/lama/bin/to_jit.py new file mode 100644 index 0000000000000000000000000000000000000000..8acea396545cdadbc004618a23c78d60c0ed6e95 --- /dev/null +++ b/lama/bin/to_jit.py @@ -0,0 +1,75 @@ +import os +from pathlib import Path + +import hydra +import torch +import yaml +from omegaconf import OmegaConf +from torch import nn + +from saicinpainting.training.trainers import load_checkpoint +from saicinpainting.utils import register_debug_signal_handlers + + +class JITWrapper(nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + + def forward(self, image, mask): + batch = { + "image": image, + "mask": mask + } + out = self.model(batch) + return out["inpainted"] + + +@hydra.main(config_path="../configs/prediction", config_name="default.yaml") +def main(predict_config: OmegaConf): + register_debug_signal_handlers() # kill -10 <pid> will result in traceback dumped into log + + train_config_path = os.path.join(predict_config.model.path, "config.yaml") + with open(train_config_path, "r") as f: + train_config = OmegaConf.create(yaml.safe_load(f)) + + train_config.training_model.predict_only = True + train_config.visualizer.kind = "noop" + + checkpoint_path = os.path.join( + predict_config.model.path, "models", predict_config.model.checkpoint + ) + model = load_checkpoint( + train_config, checkpoint_path, strict=False, map_location="cpu" + ) + model.eval() + jit_model_wrapper = JITWrapper(model) + + image = torch.rand(1, 3, 120, 120) + mask = torch.rand(1, 1, 120, 120) + output = jit_model_wrapper(image, mask) + + if torch.cuda.is_available(): + device = torch.device("cuda") + else: + device = torch.device("cpu") + + image = image.to(device) + mask = mask.to(device) + traced_model = torch.jit.trace(jit_model_wrapper, (image, mask), strict=False).to(device) + + save_path = Path(predict_config.save_path) + save_path.parent.mkdir(parents=True, exist_ok=True) + + print(f"Saving big-lama.pt model to {save_path}") + traced_model.save(save_path) + + print(f"Checking jit model output...") + jit_model = torch.jit.load(str(save_path)) + jit_output = jit_model(image, mask) + diff = (output - jit_output).abs().sum() + print(f"diff: {diff}") + + +if __name__ == "__main__": + main() diff --git a/lama/bin/train.py b/lama/bin/train.py new file mode 100755 index 0000000000000000000000000000000000000000..be9ca8c6ef2a0cb9143ab6a0f4d91f571b691a95 --- /dev/null +++ b/lama/bin/train.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +import logging +import os +import sys +import traceback + +os.environ['OMP_NUM_THREADS'] = '1' +os.environ['OPENBLAS_NUM_THREADS'] = '1' +os.environ['MKL_NUM_THREADS'] = '1' +os.environ['VECLIB_MAXIMUM_THREADS'] = '1' +os.environ['NUMEXPR_NUM_THREADS'] = '1' + +import hydra +from omegaconf import OmegaConf +from pytorch_lightning import Trainer +from pytorch_lightning.callbacks import ModelCheckpoint +from pytorch_lightning.loggers import TensorBoardLogger +from pytorch_lightning.plugins import DDPPlugin + +from saicinpainting.training.trainers import make_training_model +from saicinpainting.utils import register_debug_signal_handlers, handle_ddp_subprocess, handle_ddp_parent_process, \ + handle_deterministic_config + +LOGGER = logging.getLogger(__name__) + + +@handle_ddp_subprocess() +@hydra.main(config_path='../configs/training', config_name='tiny_test.yaml') +def main(config: OmegaConf): + try: + need_set_deterministic = handle_deterministic_config(config) + + register_debug_signal_handlers() # kill -10 <pid> will result in traceback dumped into log + + is_in_ddp_subprocess = handle_ddp_parent_process() + + config.visualizer.outdir = os.path.join(os.getcwd(), config.visualizer.outdir) + if not is_in_ddp_subprocess: + LOGGER.info(OmegaConf.to_yaml(config)) + OmegaConf.save(config, os.path.join(os.getcwd(), 'config.yaml')) + + checkpoints_dir = os.path.join(os.getcwd(), 'models') + os.makedirs(checkpoints_dir, exist_ok=True) + + # there is no need to suppress this logger in ddp, because it handles rank on its own + metrics_logger = TensorBoardLogger(config.location.tb_dir, name=os.path.basename(os.getcwd())) + metrics_logger.log_hyperparams(config) + + training_model = make_training_model(config) + + trainer_kwargs = OmegaConf.to_container(config.trainer.kwargs, resolve=True) + if need_set_deterministic: + trainer_kwargs['deterministic'] = True + + trainer = Trainer( + # there is no need to suppress checkpointing in ddp, because it handles rank on its own + callbacks=ModelCheckpoint(dirpath=checkpoints_dir, **config.trainer.checkpoint_kwargs), + logger=metrics_logger, + default_root_dir=os.getcwd(), + **trainer_kwargs + ) + trainer.fit(training_model) + except KeyboardInterrupt: + LOGGER.warning('Interrupted by user') + except Exception as ex: + LOGGER.critical(f'Training failed due to {ex}:\n{traceback.format_exc()}') + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/lama/configs/analyze_mask_errors.yaml b/lama/configs/analyze_mask_errors.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ed3924290c05369db65afd2380df9af48bf5d69 --- /dev/null +++ b/lama/configs/analyze_mask_errors.yaml @@ -0,0 +1,7 @@ +dataset_kwargs: + img_suffix: .jpg + inpainted_suffix: .jpg + +take_global_top: 30 +take_worst_best_top: 30 +take_overlapping_top: 30 \ No newline at end of file diff --git a/lama/configs/data_gen/random_medium_256.yaml b/lama/configs/data_gen/random_medium_256.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34c7f9e7802bdaa7422034560c6a6a2238f8531b --- /dev/null +++ b/lama/configs/data_gen/random_medium_256.yaml @@ -0,0 +1,33 @@ +generator_kind: random + +mask_generator_kwargs: + irregular_proba: 1 + irregular_kwargs: + min_times: 4 + max_times: 5 + max_width: 50 + max_angle: 4 + max_len: 100 + + box_proba: 0.3 + box_kwargs: + margin: 0 + bbox_min_size: 10 + bbox_max_size: 50 + max_times: 5 + min_times: 1 + + segm_proba: 0 + squares_proba: 0 + + variants_n: 5 + +max_masks_per_image: 1 + +cropping: + out_min_size: 256 + handle_small_mode: upscale + out_square_crop: True + crop_min_overlap: 1 + +max_tamper_area: 0.5 diff --git a/lama/configs/data_gen/random_medium_512.yaml b/lama/configs/data_gen/random_medium_512.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ea33832aa379f5a61c5810736184a50c36faac0 --- /dev/null +++ b/lama/configs/data_gen/random_medium_512.yaml @@ -0,0 +1,33 @@ +generator_kind: random + +mask_generator_kwargs: + irregular_proba: 1 + irregular_kwargs: + min_times: 4 + max_times: 10 + max_width: 100 + max_angle: 4 + max_len: 200 + + box_proba: 0.3 + box_kwargs: + margin: 0 + bbox_min_size: 30 + bbox_max_size: 150 + max_times: 5 + min_times: 1 + + segm_proba: 0 + squares_proba: 0 + + variants_n: 5 + +max_masks_per_image: 1 + +cropping: + out_min_size: 512 + handle_small_mode: upscale + out_square_crop: True + crop_min_overlap: 1 + +max_tamper_area: 0.5 diff --git a/lama/configs/data_gen/random_thick_256.yaml b/lama/configs/data_gen/random_thick_256.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ade0cfc5e2c56916da8c642968618617687f6db --- /dev/null +++ b/lama/configs/data_gen/random_thick_256.yaml @@ -0,0 +1,33 @@ +generator_kind: random + +mask_generator_kwargs: + irregular_proba: 1 + irregular_kwargs: + min_times: 1 + max_times: 5 + max_width: 100 + max_angle: 4 + max_len: 200 + + box_proba: 0.3 + box_kwargs: + margin: 10 + bbox_min_size: 30 + bbox_max_size: 150 + max_times: 3 + min_times: 1 + + segm_proba: 0 + squares_proba: 0 + + variants_n: 5 + +max_masks_per_image: 1 + +cropping: + out_min_size: 256 + handle_small_mode: upscale + out_square_crop: True + crop_min_overlap: 1 + +max_tamper_area: 0.5 diff --git a/lama/configs/data_gen/random_thick_512.yaml b/lama/configs/data_gen/random_thick_512.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17b7a7689cf02d094b958393aa397c49649e36f6 --- /dev/null +++ b/lama/configs/data_gen/random_thick_512.yaml @@ -0,0 +1,33 @@ +generator_kind: random + +mask_generator_kwargs: + irregular_proba: 1 + irregular_kwargs: + min_times: 1 + max_times: 5 + max_width: 250 + max_angle: 4 + max_len: 450 + + box_proba: 0.3 + box_kwargs: + margin: 10 + bbox_min_size: 30 + bbox_max_size: 300 + max_times: 4 + min_times: 1 + + segm_proba: 0 + squares_proba: 0 + + variants_n: 5 + +max_masks_per_image: 1 + +cropping: + out_min_size: 512 + handle_small_mode: upscale + out_square_crop: True + crop_min_overlap: 1 + +max_tamper_area: 0.5 diff --git a/lama/configs/data_gen/random_thin_256.yaml b/lama/configs/data_gen/random_thin_256.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0bc05cc7b60b522d915c0c08f8fa81a673535f34 --- /dev/null +++ b/lama/configs/data_gen/random_thin_256.yaml @@ -0,0 +1,25 @@ +generator_kind: random + +mask_generator_kwargs: + irregular_proba: 1 + irregular_kwargs: + min_times: 4 + max_times: 50 + max_width: 10 + max_angle: 4 + max_len: 40 + box_proba: 0 + segm_proba: 0 + squares_proba: 0 + + variants_n: 5 + +max_masks_per_image: 1 + +cropping: + out_min_size: 256 + handle_small_mode: upscale + out_square_crop: True + crop_min_overlap: 1 + +max_tamper_area: 0.5 diff --git a/lama/configs/data_gen/random_thin_512.yaml b/lama/configs/data_gen/random_thin_512.yaml new file mode 100644 index 0000000000000000000000000000000000000000..159fb64b540922b04f257cfdf433a2c9ff60dcf3 --- /dev/null +++ b/lama/configs/data_gen/random_thin_512.yaml @@ -0,0 +1,25 @@ +generator_kind: random + +mask_generator_kwargs: + irregular_proba: 1 + irregular_kwargs: + min_times: 4 + max_times: 70 + max_width: 20 + max_angle: 4 + max_len: 100 + box_proba: 0 + segm_proba: 0 + squares_proba: 0 + + variants_n: 5 + +max_masks_per_image: 1 + +cropping: + out_min_size: 512 + handle_small_mode: upscale + out_square_crop: True + crop_min_overlap: 1 + +max_tamper_area: 0.5 diff --git a/lama/configs/debug_mask_gen.yaml b/lama/configs/debug_mask_gen.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c4258157fffeea1d8416e71ef19defcd4184e98e --- /dev/null +++ b/lama/configs/debug_mask_gen.yaml @@ -0,0 +1,5 @@ +img_ext: .jpg + +gen_kwargs: + mask_size: 200 + step: 0.5 diff --git a/lama/configs/eval1.yaml b/lama/configs/eval1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ebe4e05fbe2b37ac5b60237e21a6dcadba011b8 --- /dev/null +++ b/lama/configs/eval1.yaml @@ -0,0 +1,6 @@ +evaluator_kwargs: + batch_size: 8 + +dataset_kwargs: + img_suffix: .png + inpainted_suffix: .jpg \ No newline at end of file diff --git a/lama/configs/eval2.yaml b/lama/configs/eval2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6dfb35faeb7b0a96e6e69d4bc32857f0e308456e --- /dev/null +++ b/lama/configs/eval2.yaml @@ -0,0 +1,7 @@ +evaluator_kwargs: + batch_size: 8 + device: cuda + +dataset_kwargs: + img_suffix: .png + inpainted_suffix: .png \ No newline at end of file diff --git a/lama/configs/eval2_cpu.yaml b/lama/configs/eval2_cpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ba152eb7825e90b8a05b99b968380522e0d6ebee --- /dev/null +++ b/lama/configs/eval2_cpu.yaml @@ -0,0 +1,7 @@ +evaluator_kwargs: + batch_size: 8 + device: cpu + +dataset_kwargs: + img_suffix: .png + inpainted_suffix: .png \ No newline at end of file diff --git a/lama/configs/eval2_gpu.yaml b/lama/configs/eval2_gpu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ffab909ec215d6a0bc7056dabca89b44fee416c --- /dev/null +++ b/lama/configs/eval2_gpu.yaml @@ -0,0 +1,6 @@ +evaluator_kwargs: + batch_size: 8 + +dataset_kwargs: + img_suffix: .png + inpainted_suffix: .png \ No newline at end of file diff --git a/lama/configs/eval2_jpg.yaml b/lama/configs/eval2_jpg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ebe4e05fbe2b37ac5b60237e21a6dcadba011b8 --- /dev/null +++ b/lama/configs/eval2_jpg.yaml @@ -0,0 +1,6 @@ +evaluator_kwargs: + batch_size: 8 + +dataset_kwargs: + img_suffix: .png + inpainted_suffix: .jpg \ No newline at end of file diff --git a/lama/configs/eval2_segm.yaml b/lama/configs/eval2_segm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a01e7b0f1eff00bf57d8096dad148dd155798798 --- /dev/null +++ b/lama/configs/eval2_segm.yaml @@ -0,0 +1,10 @@ +evaluator_kwargs: + batch_size: 8 + +dataset_kwargs: + img_suffix: .png + inpainted_suffix: .png + +segmentation: + enable: True + weights_path: ${TORCH_HOME} diff --git a/lama/configs/eval2_segm_test.yaml b/lama/configs/eval2_segm_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..11ac85cbbe032002c5927fcc150e1f679d9d273a --- /dev/null +++ b/lama/configs/eval2_segm_test.yaml @@ -0,0 +1,11 @@ +evaluator_kwargs: + batch_size: 1 + +dataset_kwargs: + img_suffix: _input.png + inpainted_suffix: .png + pad_out_to_modulo: 8 + +segmentation: + enable: True + weights_path: ${TORCH_HOME} diff --git a/lama/configs/eval2_test.yaml b/lama/configs/eval2_test.yaml new file mode 100644 index 0000000000000000000000000000000000000000..970e35c81df97a4c59d04eb237127719dbbce475 --- /dev/null +++ b/lama/configs/eval2_test.yaml @@ -0,0 +1,7 @@ +evaluator_kwargs: + batch_size: 1 + +dataset_kwargs: + img_suffix: _input.png + inpainted_suffix: .png + pad_out_to_modulo: 8 diff --git a/lama/configs/places2-categories_157.txt b/lama/configs/places2-categories_157.txt new file mode 100644 index 0000000000000000000000000000000000000000..b717681993346d6f776c624019e5cfe46376147b --- /dev/null +++ b/lama/configs/places2-categories_157.txt @@ -0,0 +1,157 @@ +/a/airplane_cabin 1 +/a/airport_terminal 2 +/a/alcove 3 +/a/alley 4 +/a/amphitheater 5 +/a/amusement_park 7 +/a/apartment_building/outdoor 8 +/a/aqueduct 10 +/a/arcade 11 +/a/arch 12 +/a/archive 14 +/a/art_gallery 19 +/a/artists_loft 22 +/a/assembly_line 23 +/a/atrium/public 25 +/a/attic 26 +/a/auditorium 27 +/b/bakery/shop 31 +/b/balcony/exterior 32 +/b/balcony/interior 33 +/b/ballroom 35 +/b/banquet_hall 38 +/b/barndoor 41 +/b/basement 43 +/b/basketball_court/indoor 44 +/b/bathroom 45 +/b/bazaar/indoor 46 +/b/bazaar/outdoor 47 +/b/beach_house 49 +/b/bedchamber 51 +/b/bedroom 52 +/b/berth 55 +/b/boardwalk 57 +/b/boathouse 59 +/b/bookstore 60 +/b/booth/indoor 61 +/b/bow_window/indoor 63 +/b/bowling_alley 64 +/b/bridge 66 +/b/building_facade 67 +/b/bus_interior 70 +/b/bus_station/indoor 71 +/c/cabin/outdoor 74 +/c/campus 77 +/c/canal/urban 79 +/c/candy_store 80 +/c/carrousel 83 +/c/castle 84 +/c/chalet 87 +/c/childs_room 89 +/c/church/indoor 90 +/c/church/outdoor 91 +/c/closet 95 +/c/conference_center 101 +/c/conference_room 102 +/c/construction_site 103 +/c/corridor 106 +/c/cottage 107 +/c/courthouse 108 +/c/courtyard 109 +/d/delicatessen 114 +/d/department_store 115 +/d/diner/outdoor 119 +/d/dining_hall 120 +/d/dining_room 121 +/d/doorway/outdoor 123 +/d/dorm_room 124 +/d/downtown 125 +/d/driveway 127 +/e/elevator/door 129 +/e/elevator_lobby 130 +/e/elevator_shaft 131 +/e/embassy 132 +/e/entrance_hall 134 +/e/escalator/indoor 135 +/f/fastfood_restaurant 139 +/f/fire_escape 143 +/f/fire_station 144 +/f/food_court 148 +/g/galley 155 +/g/garage/outdoor 157 +/g/gas_station 158 +/g/gazebo/exterior 159 +/g/general_store/indoor 160 +/g/general_store/outdoor 161 +/g/greenhouse/outdoor 166 +/g/gymnasium/indoor 168 +/h/hangar/outdoor 170 +/h/hardware_store 172 +/h/home_office 176 +/h/home_theater 177 +/h/hospital 178 +/h/hotel/outdoor 181 +/h/hotel_room 182 +/h/house 183 +/h/hunting_lodge/outdoor 184 +/i/industrial_area 192 +/i/inn/outdoor 193 +/j/jacuzzi/indoor 195 +/j/jail_cell 196 +/k/kasbah 200 +/k/kitchen 203 +/l/laundromat 208 +/l/library/indoor 212 +/l/library/outdoor 213 +/l/lighthouse 214 +/l/living_room 215 +/l/loading_dock 216 +/l/lobby 217 +/l/lock_chamber 218 +/m/mansion 220 +/m/manufactured_home 221 +/m/mausoleum 226 +/m/medina 227 +/m/mezzanine 228 +/m/mosque/outdoor 230 +/m/movie_theater/indoor 235 +/m/museum/outdoor 237 +/n/nursery 240 +/o/oast_house 242 +/o/office 244 +/o/office_building 245 +/o/office_cubicles 246 +/p/pagoda 251 +/p/palace 252 +/p/pantry 253 +/p/parking_garage/indoor 255 +/p/parking_garage/outdoor 256 +/p/pavilion 260 +/p/pet_shop 261 +/p/porch 272 +/r/reception 280 +/r/recreation_room 281 +/r/restaurant_patio 286 +/r/rope_bridge 291 +/r/ruin 292 +/s/sauna 295 +/s/schoolhouse 296 +/s/server_room 298 +/s/shed 299 +/s/shopfront 301 +/s/shopping_mall/indoor 302 +/s/shower 303 +/s/skyscraper 307 +/s/staircase 317 +/s/storage_room 318 +/s/subway_station/platform 320 +/s/synagogue/outdoor 327 +/t/television_room 328 +/t/temple/asia 330 +/t/throne_room 331 +/t/tower 334 +/t/train_station/platform 337 +/u/utility_room 343 +/w/waiting_room 352 +/w/wet_bar 358 +/y/youth_hostel 363 \ No newline at end of file diff --git a/lama/configs/prediction/default.yaml b/lama/configs/prediction/default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..80fa69b29d4b0ecf209564f54817639326322ebf --- /dev/null +++ b/lama/configs/prediction/default.yaml @@ -0,0 +1,24 @@ +indir: no # to be overriden in CLI +outdir: no # to be overriden in CLI + +model: + path: no # to be overriden in CLI + checkpoint: best.ckpt + +dataset: + kind: default + img_suffix: .png + pad_out_to_modulo: 8 + +device: cuda +out_key: inpainted + +refine: False # refiner will only run if this is True +refiner: + gpu_ids: 0,1 # the GPU ids of the machine to use. If only single GPU, use: "0," + modulo: ${dataset.pad_out_to_modulo} + n_iters: 15 # number of iterations of refinement for each scale + lr: 0.002 # learning rate + min_side: 512 # all sides of image on all scales should be >= min_side / sqrt(2) + max_scales: 3 # max number of downscaling scales for the image-mask pyramid + px_budget: 1800000 # pixels budget. Any image will be resized to satisfy height*width <= px_budget \ No newline at end of file diff --git a/lama/configs/test_large_30k.lst b/lama/configs/test_large_30k.lst new file mode 100644 index 0000000000000000000000000000000000000000..55f3612e53a4cac8c68ad27b43cfadd2681127aa --- /dev/null +++ b/lama/configs/test_large_30k.lst @@ -0,0 +1,30000 @@ +Places365_test_00000001.jpg +Places365_test_00000009.jpg +Places365_test_00000016.jpg +Places365_test_00000022.jpg +Places365_test_00000035.jpg +Places365_test_00000037.jpg +Places365_test_00000040.jpg +Places365_test_00000045.jpg +Places365_test_00000052.jpg +Places365_test_00000062.jpg +Places365_test_00000069.jpg +Places365_test_00000077.jpg +Places365_test_00000098.jpg +Places365_test_00000105.jpg +Places365_test_00000131.jpg +Places365_test_00000172.jpg +Places365_test_00000187.jpg +Places365_test_00000200.jpg +Places365_test_00000262.jpg +Places365_test_00000291.jpg +Places365_test_00000294.jpg +Places365_test_00000322.jpg +Places365_test_00000328.jpg +Places365_test_00000332.jpg +Places365_test_00000358.jpg +Places365_test_00000365.jpg +Places365_test_00000371.jpg +Places365_test_00000381.jpg +Places365_test_00000431.jpg +Places365_test_00000435.jpg +Places365_test_00000456.jpg +Places365_test_00000459.jpg +Places365_test_00000469.jpg +Places365_test_00000484.jpg +Places365_test_00000490.jpg +Places365_test_00000517.jpg +Places365_test_00000545.jpg +Places365_test_00000546.jpg +Places365_test_00000555.jpg +Places365_test_00000557.jpg +Places365_test_00000569.jpg +Places365_test_00000574.jpg +Places365_test_00000607.jpg +Places365_test_00000611.jpg +Places365_test_00000614.jpg +Places365_test_00000620.jpg +Places365_test_00000629.jpg +Places365_test_00000643.jpg +Places365_test_00000650.jpg +Places365_test_00000661.jpg +Places365_test_00000670.jpg +Places365_test_00000671.jpg +Places365_test_00000676.jpg +Places365_test_00000687.jpg +Places365_test_00000694.jpg +Places365_test_00000726.jpg +Places365_test_00000730.jpg +Places365_test_00000734.jpg +Places365_test_00000739.jpg +Places365_test_00000770.jpg +Places365_test_00000775.jpg +Places365_test_00000812.jpg +Places365_test_00000844.jpg +Places365_test_00000853.jpg +Places365_test_00000855.jpg +Places365_test_00000859.jpg +Places365_test_00000870.jpg +Places365_test_00000879.jpg +Places365_test_00000885.jpg +Places365_test_00000889.jpg +Places365_test_00000891.jpg +Places365_test_00000931.jpg +Places365_test_00000940.jpg +Places365_test_00000952.jpg +Places365_test_00000990.jpg +Places365_test_00000994.jpg +Places365_test_00000996.jpg +Places365_test_00000997.jpg +Places365_test_00001000.jpg +Places365_test_00001035.jpg +Places365_test_00001040.jpg +Places365_test_00001044.jpg +Places365_test_00001045.jpg +Places365_test_00001062.jpg +Places365_test_00001077.jpg +Places365_test_00001083.jpg +Places365_test_00001091.jpg +Places365_test_00001096.jpg +Places365_test_00001100.jpg +Places365_test_00001114.jpg +Places365_test_00001149.jpg +Places365_test_00001151.jpg +Places365_test_00001158.jpg +Places365_test_00001181.jpg +Places365_test_00001182.jpg +Places365_test_00001191.jpg +Places365_test_00001194.jpg +Places365_test_00001197.jpg +Places365_test_00001200.jpg +Places365_test_00001205.jpg +Places365_test_00001211.jpg +Places365_test_00001216.jpg +Places365_test_00001222.jpg +Places365_test_00001224.jpg +Places365_test_00001233.jpg +Places365_test_00001245.jpg +Places365_test_00001264.jpg +Places365_test_00001268.jpg +Places365_test_00001273.jpg +Places365_test_00001283.jpg +Places365_test_00001287.jpg +Places365_test_00001306.jpg +Places365_test_00001323.jpg +Places365_test_00001327.jpg +Places365_test_00001339.jpg +Places365_test_00001345.jpg +Places365_test_00001354.jpg +Places365_test_00001371.jpg +Places365_test_00001380.jpg +Places365_test_00001393.jpg +Places365_test_00001420.jpg +Places365_test_00001443.jpg +Places365_test_00001449.jpg +Places365_test_00001452.jpg +Places365_test_00001453.jpg +Places365_test_00001463.jpg +Places365_test_00001468.jpg +Places365_test_00001503.jpg +Places365_test_00001533.jpg +Places365_test_00001540.jpg +Places365_test_00001548.jpg +Places365_test_00001566.jpg +Places365_test_00001568.jpg +Places365_test_00001579.jpg +Places365_test_00001580.jpg +Places365_test_00001589.jpg +Places365_test_00001594.jpg +Places365_test_00001602.jpg +Places365_test_00001613.jpg +Places365_test_00001622.jpg +Places365_test_00001672.jpg +Places365_test_00001679.jpg +Places365_test_00001682.jpg +Places365_test_00001690.jpg +Places365_test_00001708.jpg +Places365_test_00001716.jpg +Places365_test_00001722.jpg +Places365_test_00001728.jpg +Places365_test_00001754.jpg +Places365_test_00001774.jpg +Places365_test_00001789.jpg +Places365_test_00001795.jpg +Places365_test_00001798.jpg +Places365_test_00001859.jpg +Places365_test_00001868.jpg +Places365_test_00001879.jpg +Places365_test_00001883.jpg +Places365_test_00001886.jpg +Places365_test_00001890.jpg +Places365_test_00001892.jpg +Places365_test_00001901.jpg +Places365_test_00001910.jpg +Places365_test_00001929.jpg +Places365_test_00001942.jpg +Places365_test_00001947.jpg +Places365_test_00001965.jpg +Places365_test_00001981.jpg +Places365_test_00001991.jpg +Places365_test_00002011.jpg +Places365_test_00002017.jpg +Places365_test_00002026.jpg +Places365_test_00002036.jpg +Places365_test_00002041.jpg +Places365_test_00002057.jpg +Places365_test_00002059.jpg +Places365_test_00002065.jpg +Places365_test_00002073.jpg +Places365_test_00002079.jpg +Places365_test_00002082.jpg +Places365_test_00002089.jpg +Places365_test_00002094.jpg +Places365_test_00002095.jpg +Places365_test_00002138.jpg +Places365_test_00002170.jpg +Places365_test_00002172.jpg +Places365_test_00002178.jpg +Places365_test_00002185.jpg +Places365_test_00002187.jpg +Places365_test_00002192.jpg +Places365_test_00002195.jpg +Places365_test_00002198.jpg +Places365_test_00002203.jpg +Places365_test_00002222.jpg +Places365_test_00002232.jpg +Places365_test_00002243.jpg +Places365_test_00002252.jpg +Places365_test_00002294.jpg +Places365_test_00002301.jpg +Places365_test_00002310.jpg +Places365_test_00002322.jpg +Places365_test_00002333.jpg +Places365_test_00002339.jpg +Places365_test_00002356.jpg +Places365_test_00002364.jpg +Places365_test_00002369.jpg +Places365_test_00002372.jpg +Places365_test_00002374.jpg +Places365_test_00002379.jpg +Places365_test_00002380.jpg +Places365_test_00002381.jpg +Places365_test_00002382.jpg +Places365_test_00002408.jpg +Places365_test_00002412.jpg +Places365_test_00002422.jpg +Places365_test_00002437.jpg +Places365_test_00002459.jpg +Places365_test_00002466.jpg +Places365_test_00002473.jpg +Places365_test_00002494.jpg +Places365_test_00002500.jpg +Places365_test_00002526.jpg +Places365_test_00002537.jpg +Places365_test_00002541.jpg +Places365_test_00002550.jpg +Places365_test_00002557.jpg +Places365_test_00002566.jpg +Places365_test_00002571.jpg +Places365_test_00002592.jpg +Places365_test_00002595.jpg +Places365_test_00002632.jpg +Places365_test_00002659.jpg +Places365_test_00002661.jpg +Places365_test_00002688.jpg +Places365_test_00002691.jpg +Places365_test_00002699.jpg +Places365_test_00002743.jpg +Places365_test_00002786.jpg +Places365_test_00002805.jpg +Places365_test_00002806.jpg +Places365_test_00002814.jpg +Places365_test_00002817.jpg +Places365_test_00002842.jpg +Places365_test_00002848.jpg +Places365_test_00002872.jpg +Places365_test_00002887.jpg +Places365_test_00002898.jpg +Places365_test_00002904.jpg +Places365_test_00002925.jpg +Places365_test_00002932.jpg +Places365_test_00002942.jpg +Places365_test_00002990.jpg +Places365_test_00002992.jpg +Places365_test_00003000.jpg +Places365_test_00003005.jpg +Places365_test_00003016.jpg +Places365_test_00003017.jpg +Places365_test_00003018.jpg +Places365_test_00003026.jpg +Places365_test_00003027.jpg +Places365_test_00003032.jpg +Places365_test_00003038.jpg +Places365_test_00003050.jpg +Places365_test_00003063.jpg +Places365_test_00003076.jpg +Places365_test_00003084.jpg +Places365_test_00003088.jpg +Places365_test_00003091.jpg +Places365_test_00003105.jpg +Places365_test_00003113.jpg +Places365_test_00003125.jpg +Places365_test_00003126.jpg +Places365_test_00003144.jpg +Places365_test_00003156.jpg +Places365_test_00003161.jpg +Places365_test_00003164.jpg +Places365_test_00003166.jpg +Places365_test_00003167.jpg +Places365_test_00003181.jpg +Places365_test_00003211.jpg +Places365_test_00003216.jpg +Places365_test_00003221.jpg +Places365_test_00003233.jpg +Places365_test_00003236.jpg +Places365_test_00003237.jpg +Places365_test_00003246.jpg +Places365_test_00003248.jpg +Places365_test_00003251.jpg +Places365_test_00003257.jpg +Places365_test_00003272.jpg +Places365_test_00003287.jpg +Places365_test_00003316.jpg +Places365_test_00003337.jpg +Places365_test_00003338.jpg +Places365_test_00003350.jpg +Places365_test_00003373.jpg +Places365_test_00003393.jpg +Places365_test_00003406.jpg +Places365_test_00003411.jpg +Places365_test_00003412.jpg +Places365_test_00003416.jpg +Places365_test_00003426.jpg +Places365_test_00003427.jpg +Places365_test_00003453.jpg +Places365_test_00003484.jpg +Places365_test_00003487.jpg +Places365_test_00003491.jpg +Places365_test_00003545.jpg +Places365_test_00003555.jpg +Places365_test_00003567.jpg +Places365_test_00003575.jpg +Places365_test_00003582.jpg +Places365_test_00003595.jpg +Places365_test_00003609.jpg +Places365_test_00003613.jpg +Places365_test_00003620.jpg +Places365_test_00003635.jpg +Places365_test_00003647.jpg +Places365_test_00003650.jpg +Places365_test_00003665.jpg +Places365_test_00003672.jpg +Places365_test_00003686.jpg +Places365_test_00003720.jpg +Places365_test_00003722.jpg +Places365_test_00003731.jpg +Places365_test_00003732.jpg +Places365_test_00003748.jpg +Places365_test_00003770.jpg +Places365_test_00003773.jpg +Places365_test_00003778.jpg +Places365_test_00003786.jpg +Places365_test_00003796.jpg +Places365_test_00003804.jpg +Places365_test_00003823.jpg +Places365_test_00003842.jpg +Places365_test_00003857.jpg +Places365_test_00003860.jpg +Places365_test_00003901.jpg +Places365_test_00003941.jpg +Places365_test_00003942.jpg +Places365_test_00003967.jpg +Places365_test_00003968.jpg +Places365_test_00003991.jpg +Places365_test_00004012.jpg +Places365_test_00004036.jpg +Places365_test_00004047.jpg +Places365_test_00004056.jpg +Places365_test_00004073.jpg +Places365_test_00004081.jpg +Places365_test_00004116.jpg +Places365_test_00004119.jpg +Places365_test_00004142.jpg +Places365_test_00004146.jpg +Places365_test_00004151.jpg +Places365_test_00004160.jpg +Places365_test_00004163.jpg +Places365_test_00004166.jpg +Places365_test_00004173.jpg +Places365_test_00004176.jpg +Places365_test_00004194.jpg +Places365_test_00004220.jpg +Places365_test_00004221.jpg +Places365_test_00004225.jpg +Places365_test_00004226.jpg +Places365_test_00004249.jpg +Places365_test_00004256.jpg +Places365_test_00004268.jpg +Places365_test_00004284.jpg +Places365_test_00004286.jpg +Places365_test_00004292.jpg +Places365_test_00004293.jpg +Places365_test_00004314.jpg +Places365_test_00004318.jpg +Places365_test_00004342.jpg +Places365_test_00004358.jpg +Places365_test_00004367.jpg +Places365_test_00004381.jpg +Places365_test_00004385.jpg +Places365_test_00004392.jpg +Places365_test_00004395.jpg +Places365_test_00004410.jpg +Places365_test_00004474.jpg +Places365_test_00004508.jpg +Places365_test_00004514.jpg +Places365_test_00004552.jpg +Places365_test_00004557.jpg +Places365_test_00004559.jpg +Places365_test_00004570.jpg +Places365_test_00004589.jpg +Places365_test_00004601.jpg +Places365_test_00004617.jpg +Places365_test_00004619.jpg +Places365_test_00004626.jpg +Places365_test_00004637.jpg +Places365_test_00004647.jpg +Places365_test_00004648.jpg +Places365_test_00004674.jpg +Places365_test_00004680.jpg +Places365_test_00004697.jpg +Places365_test_00004702.jpg +Places365_test_00004719.jpg +Places365_test_00004726.jpg +Places365_test_00004732.jpg +Places365_test_00004742.jpg +Places365_test_00004751.jpg +Places365_test_00004753.jpg +Places365_test_00004755.jpg +Places365_test_00004762.jpg +Places365_test_00004766.jpg +Places365_test_00004774.jpg +Places365_test_00004780.jpg +Places365_test_00004807.jpg +Places365_test_00004808.jpg +Places365_test_00004811.jpg +Places365_test_00004812.jpg +Places365_test_00004817.jpg +Places365_test_00004824.jpg +Places365_test_00004841.jpg +Places365_test_00004856.jpg +Places365_test_00004858.jpg +Places365_test_00004863.jpg +Places365_test_00004874.jpg +Places365_test_00004879.jpg +Places365_test_00004880.jpg +Places365_test_00004899.jpg +Places365_test_00004900.jpg +Places365_test_00004903.jpg +Places365_test_00004933.jpg +Places365_test_00004935.jpg +Places365_test_00004944.jpg +Places365_test_00004957.jpg +Places365_test_00004969.jpg +Places365_test_00004983.jpg +Places365_test_00004991.jpg +Places365_test_00005011.jpg +Places365_test_00005012.jpg +Places365_test_00005015.jpg +Places365_test_00005032.jpg +Places365_test_00005065.jpg +Places365_test_00005067.jpg +Places365_test_00005085.jpg +Places365_test_00005100.jpg +Places365_test_00005106.jpg +Places365_test_00005121.jpg +Places365_test_00005158.jpg +Places365_test_00005162.jpg +Places365_test_00005166.jpg +Places365_test_00005170.jpg +Places365_test_00005194.jpg +Places365_test_00005195.jpg +Places365_test_00005206.jpg +Places365_test_00005208.jpg +Places365_test_00005218.jpg +Places365_test_00005220.jpg +Places365_test_00005238.jpg +Places365_test_00005260.jpg +Places365_test_00005289.jpg +Places365_test_00005296.jpg +Places365_test_00005298.jpg +Places365_test_00005310.jpg +Places365_test_00005325.jpg +Places365_test_00005343.jpg +Places365_test_00005361.jpg +Places365_test_00005375.jpg +Places365_test_00005419.jpg +Places365_test_00005427.jpg +Places365_test_00005439.jpg +Places365_test_00005449.jpg +Places365_test_00005467.jpg +Places365_test_00005475.jpg +Places365_test_00005489.jpg +Places365_test_00005493.jpg +Places365_test_00005507.jpg +Places365_test_00005526.jpg +Places365_test_00005538.jpg +Places365_test_00005542.jpg +Places365_test_00005547.jpg +Places365_test_00005578.jpg +Places365_test_00005586.jpg +Places365_test_00005620.jpg +Places365_test_00005629.jpg +Places365_test_00005640.jpg +Places365_test_00005643.jpg +Places365_test_00005662.jpg +Places365_test_00005669.jpg +Places365_test_00005682.jpg +Places365_test_00005723.jpg +Places365_test_00005726.jpg +Places365_test_00005732.jpg +Places365_test_00005764.jpg +Places365_test_00005775.jpg +Places365_test_00005820.jpg +Places365_test_00005827.jpg +Places365_test_00005843.jpg +Places365_test_00005844.jpg +Places365_test_00005860.jpg +Places365_test_00005868.jpg +Places365_test_00005876.jpg +Places365_test_00005895.jpg +Places365_test_00005897.jpg +Places365_test_00005918.jpg +Places365_test_00005931.jpg +Places365_test_00005934.jpg +Places365_test_00005962.jpg +Places365_test_00005971.jpg +Places365_test_00006003.jpg +Places365_test_00006049.jpg +Places365_test_00006060.jpg +Places365_test_00006068.jpg +Places365_test_00006070.jpg +Places365_test_00006078.jpg +Places365_test_00006090.jpg +Places365_test_00006106.jpg +Places365_test_00006124.jpg +Places365_test_00006141.jpg +Places365_test_00006154.jpg +Places365_test_00006160.jpg +Places365_test_00006165.jpg +Places365_test_00006172.jpg +Places365_test_00006199.jpg +Places365_test_00006260.jpg +Places365_test_00006266.jpg +Places365_test_00006271.jpg +Places365_test_00006272.jpg +Places365_test_00006284.jpg +Places365_test_00006285.jpg +Places365_test_00006291.jpg +Places365_test_00006300.jpg +Places365_test_00006305.jpg +Places365_test_00006326.jpg +Places365_test_00006353.jpg +Places365_test_00006356.jpg +Places365_test_00006359.jpg +Places365_test_00006385.jpg +Places365_test_00006387.jpg +Places365_test_00006405.jpg +Places365_test_00006409.jpg +Places365_test_00006420.jpg +Places365_test_00006425.jpg +Places365_test_00006428.jpg +Places365_test_00006434.jpg +Places365_test_00006439.jpg +Places365_test_00006452.jpg +Places365_test_00006457.jpg +Places365_test_00006460.jpg +Places365_test_00006466.jpg +Places365_test_00006503.jpg +Places365_test_00006510.jpg +Places365_test_00006519.jpg +Places365_test_00006526.jpg +Places365_test_00006531.jpg +Places365_test_00006545.jpg +Places365_test_00006550.jpg +Places365_test_00006551.jpg +Places365_test_00006558.jpg +Places365_test_00006565.jpg +Places365_test_00006575.jpg +Places365_test_00006578.jpg +Places365_test_00006579.jpg +Places365_test_00006584.jpg +Places365_test_00006599.jpg +Places365_test_00006609.jpg +Places365_test_00006616.jpg +Places365_test_00006642.jpg +Places365_test_00006691.jpg +Places365_test_00006696.jpg +Places365_test_00006698.jpg +Places365_test_00006704.jpg +Places365_test_00006717.jpg +Places365_test_00006719.jpg +Places365_test_00006727.jpg +Places365_test_00006731.jpg +Places365_test_00006741.jpg +Places365_test_00006750.jpg +Places365_test_00006751.jpg +Places365_test_00006755.jpg +Places365_test_00006759.jpg +Places365_test_00006761.jpg +Places365_test_00006778.jpg +Places365_test_00006783.jpg +Places365_test_00006813.jpg +Places365_test_00006814.jpg +Places365_test_00006844.jpg +Places365_test_00006845.jpg +Places365_test_00006847.jpg +Places365_test_00006853.jpg +Places365_test_00006854.jpg +Places365_test_00006855.jpg +Places365_test_00006866.jpg +Places365_test_00006869.jpg +Places365_test_00006886.jpg +Places365_test_00006891.jpg +Places365_test_00006893.jpg +Places365_test_00006899.jpg +Places365_test_00006907.jpg +Places365_test_00006908.jpg +Places365_test_00006921.jpg +Places365_test_00006926.jpg +Places365_test_00006935.jpg +Places365_test_00006940.jpg +Places365_test_00006946.jpg +Places365_test_00006995.jpg +Places365_test_00007014.jpg +Places365_test_00007025.jpg +Places365_test_00007030.jpg +Places365_test_00007036.jpg +Places365_test_00007040.jpg +Places365_test_00007042.jpg +Places365_test_00007053.jpg +Places365_test_00007058.jpg +Places365_test_00007072.jpg +Places365_test_00007077.jpg +Places365_test_00007099.jpg +Places365_test_00007103.jpg +Places365_test_00007104.jpg +Places365_test_00007108.jpg +Places365_test_00007110.jpg +Places365_test_00007112.jpg +Places365_test_00007117.jpg +Places365_test_00007123.jpg +Places365_test_00007129.jpg +Places365_test_00007146.jpg +Places365_test_00007149.jpg +Places365_test_00007179.jpg +Places365_test_00007213.jpg +Places365_test_00007215.jpg +Places365_test_00007220.jpg +Places365_test_00007222.jpg +Places365_test_00007255.jpg +Places365_test_00007259.jpg +Places365_test_00007262.jpg +Places365_test_00007283.jpg +Places365_test_00007291.jpg +Places365_test_00007293.jpg +Places365_test_00007309.jpg +Places365_test_00007333.jpg +Places365_test_00007343.jpg +Places365_test_00007353.jpg +Places365_test_00007373.jpg +Places365_test_00007387.jpg +Places365_test_00007389.jpg +Places365_test_00007395.jpg +Places365_test_00007396.jpg +Places365_test_00007404.jpg +Places365_test_00007405.jpg +Places365_test_00007408.jpg +Places365_test_00007427.jpg +Places365_test_00007441.jpg +Places365_test_00007463.jpg +Places365_test_00007467.jpg +Places365_test_00007477.jpg +Places365_test_00007480.jpg +Places365_test_00007513.jpg +Places365_test_00007514.jpg +Places365_test_00007533.jpg +Places365_test_00007536.jpg +Places365_test_00007556.jpg +Places365_test_00007571.jpg +Places365_test_00007572.jpg +Places365_test_00007594.jpg +Places365_test_00007608.jpg +Places365_test_00007617.jpg +Places365_test_00007619.jpg +Places365_test_00007620.jpg +Places365_test_00007623.jpg +Places365_test_00007624.jpg +Places365_test_00007625.jpg +Places365_test_00007644.jpg +Places365_test_00007646.jpg +Places365_test_00007679.jpg +Places365_test_00007688.jpg +Places365_test_00007695.jpg +Places365_test_00007710.jpg +Places365_test_00007724.jpg +Places365_test_00007730.jpg +Places365_test_00007746.jpg +Places365_test_00007753.jpg +Places365_test_00007762.jpg +Places365_test_00007782.jpg +Places365_test_00007794.jpg +Places365_test_00007802.jpg +Places365_test_00007803.jpg +Places365_test_00007809.jpg +Places365_test_00007825.jpg +Places365_test_00007831.jpg +Places365_test_00007834.jpg +Places365_test_00007842.jpg +Places365_test_00007868.jpg +Places365_test_00007871.jpg +Places365_test_00007880.jpg +Places365_test_00007896.jpg +Places365_test_00007914.jpg +Places365_test_00007915.jpg +Places365_test_00007920.jpg +Places365_test_00007931.jpg +Places365_test_00007945.jpg +Places365_test_00007949.jpg +Places365_test_00007964.jpg +Places365_test_00007976.jpg +Places365_test_00007996.jpg +Places365_test_00008005.jpg +Places365_test_00008016.jpg +Places365_test_00008019.jpg +Places365_test_00008023.jpg +Places365_test_00008066.jpg +Places365_test_00008070.jpg +Places365_test_00008101.jpg +Places365_test_00008103.jpg +Places365_test_00008107.jpg +Places365_test_00008124.jpg +Places365_test_00008130.jpg +Places365_test_00008185.jpg +Places365_test_00008209.jpg +Places365_test_00008248.jpg +Places365_test_00008256.jpg +Places365_test_00008274.jpg +Places365_test_00008291.jpg +Places365_test_00008304.jpg +Places365_test_00008306.jpg +Places365_test_00008319.jpg +Places365_test_00008322.jpg +Places365_test_00008353.jpg +Places365_test_00008359.jpg +Places365_test_00008363.jpg +Places365_test_00008374.jpg +Places365_test_00008377.jpg +Places365_test_00008384.jpg +Places365_test_00008391.jpg +Places365_test_00008405.jpg +Places365_test_00008414.jpg +Places365_test_00008419.jpg +Places365_test_00008425.jpg +Places365_test_00008431.jpg +Places365_test_00008436.jpg +Places365_test_00008461.jpg +Places365_test_00008465.jpg +Places365_test_00008479.jpg +Places365_test_00008482.jpg +Places365_test_00008487.jpg +Places365_test_00008493.jpg +Places365_test_00008497.jpg +Places365_test_00008501.jpg +Places365_test_00008504.jpg +Places365_test_00008520.jpg +Places365_test_00008522.jpg +Places365_test_00008530.jpg +Places365_test_00008553.jpg +Places365_test_00008557.jpg +Places365_test_00008569.jpg +Places365_test_00008588.jpg +Places365_test_00008589.jpg +Places365_test_00008590.jpg +Places365_test_00008610.jpg +Places365_test_00008611.jpg +Places365_test_00008617.jpg +Places365_test_00008630.jpg +Places365_test_00008639.jpg +Places365_test_00008649.jpg +Places365_test_00008654.jpg +Places365_test_00008676.jpg +Places365_test_00008685.jpg +Places365_test_00008693.jpg +Places365_test_00008716.jpg +Places365_test_00008744.jpg +Places365_test_00008750.jpg +Places365_test_00008754.jpg +Places365_test_00008761.jpg +Places365_test_00008766.jpg +Places365_test_00008776.jpg +Places365_test_00008777.jpg +Places365_test_00008790.jpg +Places365_test_00008791.jpg +Places365_test_00008800.jpg +Places365_test_00008845.jpg +Places365_test_00008852.jpg +Places365_test_00008883.jpg +Places365_test_00008887.jpg +Places365_test_00008917.jpg +Places365_test_00008934.jpg +Places365_test_00008946.jpg +Places365_test_00008960.jpg +Places365_test_00008973.jpg +Places365_test_00009005.jpg +Places365_test_00009009.jpg +Places365_test_00009034.jpg +Places365_test_00009041.jpg +Places365_test_00009050.jpg +Places365_test_00009055.jpg +Places365_test_00009063.jpg +Places365_test_00009072.jpg +Places365_test_00009073.jpg +Places365_test_00009076.jpg +Places365_test_00009103.jpg +Places365_test_00009106.jpg +Places365_test_00009133.jpg +Places365_test_00009134.jpg +Places365_test_00009145.jpg +Places365_test_00009150.jpg +Places365_test_00009163.jpg +Places365_test_00009178.jpg +Places365_test_00009185.jpg +Places365_test_00009191.jpg +Places365_test_00009209.jpg +Places365_test_00009218.jpg +Places365_test_00009219.jpg +Places365_test_00009230.jpg +Places365_test_00009235.jpg +Places365_test_00009245.jpg +Places365_test_00009256.jpg +Places365_test_00009262.jpg +Places365_test_00009285.jpg +Places365_test_00009290.jpg +Places365_test_00009296.jpg +Places365_test_00009297.jpg +Places365_test_00009304.jpg +Places365_test_00009320.jpg +Places365_test_00009357.jpg +Places365_test_00009399.jpg +Places365_test_00009400.jpg +Places365_test_00009408.jpg +Places365_test_00009412.jpg +Places365_test_00009429.jpg +Places365_test_00009436.jpg +Places365_test_00009444.jpg +Places365_test_00009450.jpg +Places365_test_00009451.jpg +Places365_test_00009472.jpg +Places365_test_00009487.jpg +Places365_test_00009494.jpg +Places365_test_00009500.jpg +Places365_test_00009502.jpg +Places365_test_00009510.jpg +Places365_test_00009536.jpg +Places365_test_00009539.jpg +Places365_test_00009545.jpg +Places365_test_00009546.jpg +Places365_test_00009551.jpg +Places365_test_00009561.jpg +Places365_test_00009562.jpg +Places365_test_00009563.jpg +Places365_test_00009577.jpg +Places365_test_00009584.jpg +Places365_test_00009602.jpg +Places365_test_00009658.jpg +Places365_test_00009660.jpg +Places365_test_00009665.jpg +Places365_test_00009684.jpg +Places365_test_00009689.jpg +Places365_test_00009700.jpg +Places365_test_00009706.jpg +Places365_test_00009707.jpg +Places365_test_00009715.jpg +Places365_test_00009743.jpg +Places365_test_00009761.jpg +Places365_test_00009775.jpg +Places365_test_00009776.jpg +Places365_test_00009791.jpg +Places365_test_00009794.jpg +Places365_test_00009811.jpg +Places365_test_00009824.jpg +Places365_test_00009835.jpg +Places365_test_00009845.jpg +Places365_test_00009846.jpg +Places365_test_00009848.jpg +Places365_test_00009861.jpg +Places365_test_00009871.jpg +Places365_test_00009874.jpg +Places365_test_00009893.jpg +Places365_test_00009896.jpg +Places365_test_00009905.jpg +Places365_test_00009906.jpg +Places365_test_00009912.jpg +Places365_test_00009915.jpg +Places365_test_00009920.jpg +Places365_test_00009927.jpg +Places365_test_00009928.jpg +Places365_test_00009930.jpg +Places365_test_00009934.jpg +Places365_test_00009941.jpg +Places365_test_00009952.jpg +Places365_test_00009956.jpg +Places365_test_00009976.jpg +Places365_test_00009979.jpg +Places365_test_00009981.jpg +Places365_test_00009994.jpg +Places365_test_00009998.jpg +Places365_test_00010002.jpg +Places365_test_00010006.jpg +Places365_test_00010007.jpg +Places365_test_00010014.jpg +Places365_test_00010053.jpg +Places365_test_00010060.jpg +Places365_test_00010062.jpg +Places365_test_00010067.jpg +Places365_test_00010081.jpg +Places365_test_00010084.jpg +Places365_test_00010099.jpg +Places365_test_00010105.jpg +Places365_test_00010110.jpg +Places365_test_00010112.jpg +Places365_test_00010151.jpg +Places365_test_00010176.jpg +Places365_test_00010181.jpg +Places365_test_00010196.jpg +Places365_test_00010203.jpg +Places365_test_00010231.jpg +Places365_test_00010236.jpg +Places365_test_00010241.jpg +Places365_test_00010250.jpg +Places365_test_00010266.jpg +Places365_test_00010280.jpg +Places365_test_00010291.jpg +Places365_test_00010293.jpg +Places365_test_00010304.jpg +Places365_test_00010319.jpg +Places365_test_00010331.jpg +Places365_test_00010342.jpg +Places365_test_00010352.jpg +Places365_test_00010363.jpg +Places365_test_00010376.jpg +Places365_test_00010381.jpg +Places365_test_00010402.jpg +Places365_test_00010404.jpg +Places365_test_00010415.jpg +Places365_test_00010434.jpg +Places365_test_00010450.jpg +Places365_test_00010455.jpg +Places365_test_00010462.jpg +Places365_test_00010489.jpg +Places365_test_00010521.jpg +Places365_test_00010556.jpg +Places365_test_00010567.jpg +Places365_test_00010578.jpg +Places365_test_00010587.jpg +Places365_test_00010598.jpg +Places365_test_00010623.jpg +Places365_test_00010624.jpg +Places365_test_00010627.jpg +Places365_test_00010634.jpg +Places365_test_00010638.jpg +Places365_test_00010640.jpg +Places365_test_00010643.jpg +Places365_test_00010678.jpg +Places365_test_00010682.jpg +Places365_test_00010689.jpg +Places365_test_00010692.jpg +Places365_test_00010707.jpg +Places365_test_00010726.jpg +Places365_test_00010750.jpg +Places365_test_00010752.jpg +Places365_test_00010774.jpg +Places365_test_00010781.jpg +Places365_test_00010802.jpg +Places365_test_00010807.jpg +Places365_test_00010816.jpg +Places365_test_00010825.jpg +Places365_test_00010830.jpg +Places365_test_00010841.jpg +Places365_test_00010867.jpg +Places365_test_00010874.jpg +Places365_test_00010876.jpg +Places365_test_00010881.jpg +Places365_test_00010888.jpg +Places365_test_00010895.jpg +Places365_test_00010911.jpg +Places365_test_00010921.jpg +Places365_test_00010959.jpg +Places365_test_00010971.jpg +Places365_test_00010989.jpg +Places365_test_00011016.jpg +Places365_test_00011017.jpg +Places365_test_00011044.jpg +Places365_test_00011076.jpg +Places365_test_00011090.jpg +Places365_test_00011101.jpg +Places365_test_00011107.jpg +Places365_test_00011128.jpg +Places365_test_00011134.jpg +Places365_test_00011146.jpg +Places365_test_00011152.jpg +Places365_test_00011170.jpg +Places365_test_00011183.jpg +Places365_test_00011202.jpg +Places365_test_00011206.jpg +Places365_test_00011211.jpg +Places365_test_00011213.jpg +Places365_test_00011214.jpg +Places365_test_00011215.jpg +Places365_test_00011240.jpg +Places365_test_00011260.jpg +Places365_test_00011262.jpg +Places365_test_00011273.jpg +Places365_test_00011277.jpg +Places365_test_00011280.jpg +Places365_test_00011282.jpg +Places365_test_00011284.jpg +Places365_test_00011295.jpg +Places365_test_00011300.jpg +Places365_test_00011310.jpg +Places365_test_00011312.jpg +Places365_test_00011313.jpg +Places365_test_00011330.jpg +Places365_test_00011332.jpg +Places365_test_00011352.jpg +Places365_test_00011358.jpg +Places365_test_00011368.jpg +Places365_test_00011377.jpg +Places365_test_00011418.jpg +Places365_test_00011456.jpg +Places365_test_00011457.jpg +Places365_test_00011477.jpg +Places365_test_00011480.jpg +Places365_test_00011495.jpg +Places365_test_00011508.jpg +Places365_test_00011515.jpg +Places365_test_00011534.jpg +Places365_test_00011545.jpg +Places365_test_00011560.jpg +Places365_test_00011584.jpg +Places365_test_00011591.jpg +Places365_test_00011619.jpg +Places365_test_00011623.jpg +Places365_test_00011626.jpg +Places365_test_00011649.jpg +Places365_test_00011669.jpg +Places365_test_00011674.jpg +Places365_test_00011686.jpg +Places365_test_00011690.jpg +Places365_test_00011707.jpg +Places365_test_00011718.jpg +Places365_test_00011719.jpg +Places365_test_00011742.jpg +Places365_test_00011747.jpg +Places365_test_00011759.jpg +Places365_test_00011774.jpg +Places365_test_00011790.jpg +Places365_test_00011801.jpg +Places365_test_00011824.jpg +Places365_test_00011826.jpg +Places365_test_00011848.jpg +Places365_test_00011862.jpg +Places365_test_00011869.jpg +Places365_test_00011870.jpg +Places365_test_00011871.jpg +Places365_test_00011873.jpg +Places365_test_00011877.jpg +Places365_test_00011887.jpg +Places365_test_00011896.jpg +Places365_test_00011899.jpg +Places365_test_00011900.jpg +Places365_test_00011903.jpg +Places365_test_00011925.jpg +Places365_test_00011939.jpg +Places365_test_00011943.jpg +Places365_test_00011954.jpg +Places365_test_00011958.jpg +Places365_test_00011960.jpg +Places365_test_00011963.jpg +Places365_test_00012001.jpg +Places365_test_00012008.jpg +Places365_test_00012010.jpg +Places365_test_00012022.jpg +Places365_test_00012046.jpg +Places365_test_00012051.jpg +Places365_test_00012075.jpg +Places365_test_00012076.jpg +Places365_test_00012084.jpg +Places365_test_00012100.jpg +Places365_test_00012127.jpg +Places365_test_00012133.jpg +Places365_test_00012135.jpg +Places365_test_00012141.jpg +Places365_test_00012186.jpg +Places365_test_00012200.jpg +Places365_test_00012218.jpg +Places365_test_00012224.jpg +Places365_test_00012230.jpg +Places365_test_00012254.jpg +Places365_test_00012265.jpg +Places365_test_00012269.jpg +Places365_test_00012270.jpg +Places365_test_00012279.jpg +Places365_test_00012317.jpg +Places365_test_00012320.jpg +Places365_test_00012338.jpg +Places365_test_00012342.jpg +Places365_test_00012364.jpg +Places365_test_00012370.jpg +Places365_test_00012384.jpg +Places365_test_00012392.jpg +Places365_test_00012397.jpg +Places365_test_00012402.jpg +Places365_test_00012415.jpg +Places365_test_00012425.jpg +Places365_test_00012440.jpg +Places365_test_00012441.jpg +Places365_test_00012452.jpg +Places365_test_00012456.jpg +Places365_test_00012470.jpg +Places365_test_00012473.jpg +Places365_test_00012475.jpg +Places365_test_00012480.jpg +Places365_test_00012487.jpg +Places365_test_00012491.jpg +Places365_test_00012501.jpg +Places365_test_00012510.jpg +Places365_test_00012519.jpg +Places365_test_00012543.jpg +Places365_test_00012552.jpg +Places365_test_00012553.jpg +Places365_test_00012557.jpg +Places365_test_00012563.jpg +Places365_test_00012564.jpg +Places365_test_00012585.jpg +Places365_test_00012587.jpg +Places365_test_00012614.jpg +Places365_test_00012615.jpg +Places365_test_00012616.jpg +Places365_test_00012622.jpg +Places365_test_00012636.jpg +Places365_test_00012640.jpg +Places365_test_00012644.jpg +Places365_test_00012672.jpg +Places365_test_00012681.jpg +Places365_test_00012723.jpg +Places365_test_00012730.jpg +Places365_test_00012745.jpg +Places365_test_00012780.jpg +Places365_test_00012791.jpg +Places365_test_00012792.jpg +Places365_test_00012799.jpg +Places365_test_00012801.jpg +Places365_test_00012832.jpg +Places365_test_00012838.jpg +Places365_test_00012842.jpg +Places365_test_00012901.jpg +Places365_test_00012905.jpg +Places365_test_00012913.jpg +Places365_test_00012922.jpg +Places365_test_00012926.jpg +Places365_test_00012927.jpg +Places365_test_00012946.jpg +Places365_test_00012981.jpg +Places365_test_00012985.jpg +Places365_test_00012989.jpg +Places365_test_00013005.jpg +Places365_test_00013007.jpg +Places365_test_00013018.jpg +Places365_test_00013035.jpg +Places365_test_00013054.jpg +Places365_test_00013070.jpg +Places365_test_00013073.jpg +Places365_test_00013104.jpg +Places365_test_00013109.jpg +Places365_test_00013115.jpg +Places365_test_00013124.jpg +Places365_test_00013128.jpg +Places365_test_00013130.jpg +Places365_test_00013144.jpg +Places365_test_00013151.jpg +Places365_test_00013157.jpg +Places365_test_00013163.jpg +Places365_test_00013189.jpg +Places365_test_00013196.jpg +Places365_test_00013209.jpg +Places365_test_00013213.jpg +Places365_test_00013218.jpg +Places365_test_00013244.jpg +Places365_test_00013245.jpg +Places365_test_00013248.jpg +Places365_test_00013250.jpg +Places365_test_00013256.jpg +Places365_test_00013264.jpg +Places365_test_00013265.jpg +Places365_test_00013269.jpg +Places365_test_00013271.jpg +Places365_test_00013280.jpg +Places365_test_00013328.jpg +Places365_test_00013359.jpg +Places365_test_00013369.jpg +Places365_test_00013376.jpg +Places365_test_00013378.jpg +Places365_test_00013389.jpg +Places365_test_00013398.jpg +Places365_test_00013403.jpg +Places365_test_00013410.jpg +Places365_test_00013417.jpg +Places365_test_00013439.jpg +Places365_test_00013440.jpg +Places365_test_00013457.jpg +Places365_test_00013467.jpg +Places365_test_00013485.jpg +Places365_test_00013491.jpg +Places365_test_00013501.jpg +Places365_test_00013524.jpg +Places365_test_00013525.jpg +Places365_test_00013557.jpg +Places365_test_00013563.jpg +Places365_test_00013574.jpg +Places365_test_00013581.jpg +Places365_test_00013594.jpg +Places365_test_00013611.jpg +Places365_test_00013619.jpg +Places365_test_00013624.jpg +Places365_test_00013648.jpg +Places365_test_00013655.jpg +Places365_test_00013658.jpg +Places365_test_00013663.jpg +Places365_test_00013666.jpg +Places365_test_00013669.jpg +Places365_test_00013674.jpg +Places365_test_00013679.jpg +Places365_test_00013692.jpg +Places365_test_00013701.jpg +Places365_test_00013726.jpg +Places365_test_00013730.jpg +Places365_test_00013748.jpg +Places365_test_00013757.jpg +Places365_test_00013782.jpg +Places365_test_00013786.jpg +Places365_test_00013795.jpg +Places365_test_00013813.jpg +Places365_test_00013825.jpg +Places365_test_00013833.jpg +Places365_test_00013837.jpg +Places365_test_00013926.jpg +Places365_test_00013934.jpg +Places365_test_00013975.jpg +Places365_test_00014012.jpg +Places365_test_00014014.jpg +Places365_test_00014021.jpg +Places365_test_00014029.jpg +Places365_test_00014039.jpg +Places365_test_00014047.jpg +Places365_test_00014048.jpg +Places365_test_00014052.jpg +Places365_test_00014053.jpg +Places365_test_00014055.jpg +Places365_test_00014060.jpg +Places365_test_00014077.jpg +Places365_test_00014081.jpg +Places365_test_00014086.jpg +Places365_test_00014087.jpg +Places365_test_00014111.jpg +Places365_test_00014114.jpg +Places365_test_00014115.jpg +Places365_test_00014118.jpg +Places365_test_00014124.jpg +Places365_test_00014162.jpg +Places365_test_00014177.jpg +Places365_test_00014195.jpg +Places365_test_00014201.jpg +Places365_test_00014203.jpg +Places365_test_00014204.jpg +Places365_test_00014206.jpg +Places365_test_00014211.jpg +Places365_test_00014215.jpg +Places365_test_00014216.jpg +Places365_test_00014271.jpg +Places365_test_00014278.jpg +Places365_test_00014291.jpg +Places365_test_00014299.jpg +Places365_test_00014300.jpg +Places365_test_00014314.jpg +Places365_test_00014318.jpg +Places365_test_00014320.jpg +Places365_test_00014332.jpg +Places365_test_00014338.jpg +Places365_test_00014350.jpg +Places365_test_00014364.jpg +Places365_test_00014380.jpg +Places365_test_00014381.jpg +Places365_test_00014387.jpg +Places365_test_00014401.jpg +Places365_test_00014407.jpg +Places365_test_00014414.jpg +Places365_test_00014437.jpg +Places365_test_00014453.jpg +Places365_test_00014458.jpg +Places365_test_00014462.jpg +Places365_test_00014471.jpg +Places365_test_00014486.jpg +Places365_test_00014488.jpg +Places365_test_00014505.jpg +Places365_test_00014510.jpg +Places365_test_00014511.jpg +Places365_test_00014526.jpg +Places365_test_00014536.jpg +Places365_test_00014542.jpg +Places365_test_00014567.jpg +Places365_test_00014568.jpg +Places365_test_00014576.jpg +Places365_test_00014607.jpg +Places365_test_00014610.jpg +Places365_test_00014615.jpg +Places365_test_00014626.jpg +Places365_test_00014632.jpg +Places365_test_00014639.jpg +Places365_test_00014643.jpg +Places365_test_00014648.jpg +Places365_test_00014652.jpg +Places365_test_00014662.jpg +Places365_test_00014685.jpg +Places365_test_00014686.jpg +Places365_test_00014705.jpg +Places365_test_00014714.jpg +Places365_test_00014715.jpg +Places365_test_00014716.jpg +Places365_test_00014749.jpg +Places365_test_00014757.jpg +Places365_test_00014764.jpg +Places365_test_00014798.jpg +Places365_test_00014825.jpg +Places365_test_00014838.jpg +Places365_test_00014842.jpg +Places365_test_00014846.jpg +Places365_test_00014853.jpg +Places365_test_00014859.jpg +Places365_test_00014861.jpg +Places365_test_00014873.jpg +Places365_test_00014879.jpg +Places365_test_00014884.jpg +Places365_test_00014906.jpg +Places365_test_00014907.jpg +Places365_test_00014916.jpg +Places365_test_00014934.jpg +Places365_test_00014961.jpg +Places365_test_00014963.jpg +Places365_test_00015007.jpg +Places365_test_00015008.jpg +Places365_test_00015013.jpg +Places365_test_00015046.jpg +Places365_test_00015087.jpg +Places365_test_00015100.jpg +Places365_test_00015107.jpg +Places365_test_00015109.jpg +Places365_test_00015111.jpg +Places365_test_00015139.jpg +Places365_test_00015148.jpg +Places365_test_00015155.jpg +Places365_test_00015173.jpg +Places365_test_00015184.jpg +Places365_test_00015187.jpg +Places365_test_00015189.jpg +Places365_test_00015193.jpg +Places365_test_00015201.jpg +Places365_test_00015214.jpg +Places365_test_00015226.jpg +Places365_test_00015243.jpg +Places365_test_00015291.jpg +Places365_test_00015302.jpg +Places365_test_00015335.jpg +Places365_test_00015339.jpg +Places365_test_00015340.jpg +Places365_test_00015362.jpg +Places365_test_00015371.jpg +Places365_test_00015373.jpg +Places365_test_00015374.jpg +Places365_test_00015415.jpg +Places365_test_00015418.jpg +Places365_test_00015425.jpg +Places365_test_00015442.jpg +Places365_test_00015450.jpg +Places365_test_00015465.jpg +Places365_test_00015476.jpg +Places365_test_00015497.jpg +Places365_test_00015560.jpg +Places365_test_00015565.jpg +Places365_test_00015574.jpg +Places365_test_00015577.jpg +Places365_test_00015578.jpg +Places365_test_00015586.jpg +Places365_test_00015588.jpg +Places365_test_00015595.jpg +Places365_test_00015633.jpg +Places365_test_00015640.jpg +Places365_test_00015650.jpg +Places365_test_00015651.jpg +Places365_test_00015691.jpg +Places365_test_00015700.jpg +Places365_test_00015704.jpg +Places365_test_00015712.jpg +Places365_test_00015723.jpg +Places365_test_00015740.jpg +Places365_test_00015772.jpg +Places365_test_00015780.jpg +Places365_test_00015792.jpg +Places365_test_00015802.jpg +Places365_test_00015803.jpg +Places365_test_00015812.jpg +Places365_test_00015813.jpg +Places365_test_00015826.jpg +Places365_test_00015836.jpg +Places365_test_00015839.jpg +Places365_test_00015842.jpg +Places365_test_00015847.jpg +Places365_test_00015854.jpg +Places365_test_00015858.jpg +Places365_test_00015869.jpg +Places365_test_00015872.jpg +Places365_test_00015874.jpg +Places365_test_00015877.jpg +Places365_test_00015878.jpg +Places365_test_00015883.jpg +Places365_test_00015895.jpg +Places365_test_00015909.jpg +Places365_test_00015916.jpg +Places365_test_00015918.jpg +Places365_test_00015954.jpg +Places365_test_00016000.jpg +Places365_test_00016009.jpg +Places365_test_00016013.jpg +Places365_test_00016036.jpg +Places365_test_00016039.jpg +Places365_test_00016040.jpg +Places365_test_00016053.jpg +Places365_test_00016059.jpg +Places365_test_00016074.jpg +Places365_test_00016077.jpg +Places365_test_00016085.jpg +Places365_test_00016086.jpg +Places365_test_00016091.jpg +Places365_test_00016096.jpg +Places365_test_00016097.jpg +Places365_test_00016130.jpg +Places365_test_00016147.jpg +Places365_test_00016152.jpg +Places365_test_00016168.jpg +Places365_test_00016176.jpg +Places365_test_00016200.jpg +Places365_test_00016232.jpg +Places365_test_00016237.jpg +Places365_test_00016255.jpg +Places365_test_00016267.jpg +Places365_test_00016271.jpg +Places365_test_00016280.jpg +Places365_test_00016300.jpg +Places365_test_00016307.jpg +Places365_test_00016326.jpg +Places365_test_00016342.jpg +Places365_test_00016343.jpg +Places365_test_00016352.jpg +Places365_test_00016356.jpg +Places365_test_00016386.jpg +Places365_test_00016387.jpg +Places365_test_00016393.jpg +Places365_test_00016394.jpg +Places365_test_00016401.jpg +Places365_test_00016407.jpg +Places365_test_00016411.jpg +Places365_test_00016423.jpg +Places365_test_00016431.jpg +Places365_test_00016435.jpg +Places365_test_00016478.jpg +Places365_test_00016520.jpg +Places365_test_00016541.jpg +Places365_test_00016550.jpg +Places365_test_00016558.jpg +Places365_test_00016595.jpg +Places365_test_00016627.jpg +Places365_test_00016639.jpg +Places365_test_00016665.jpg +Places365_test_00016670.jpg +Places365_test_00016671.jpg +Places365_test_00016698.jpg +Places365_test_00016702.jpg +Places365_test_00016705.jpg +Places365_test_00016707.jpg +Places365_test_00016714.jpg +Places365_test_00016725.jpg +Places365_test_00016734.jpg +Places365_test_00016748.jpg +Places365_test_00016766.jpg +Places365_test_00016778.jpg +Places365_test_00016787.jpg +Places365_test_00016812.jpg +Places365_test_00016820.jpg +Places365_test_00016838.jpg +Places365_test_00016843.jpg +Places365_test_00016857.jpg +Places365_test_00016864.jpg +Places365_test_00016866.jpg +Places365_test_00016880.jpg +Places365_test_00016883.jpg +Places365_test_00016905.jpg +Places365_test_00016906.jpg +Places365_test_00016913.jpg +Places365_test_00016915.jpg +Places365_test_00016933.jpg +Places365_test_00016954.jpg +Places365_test_00016955.jpg +Places365_test_00016957.jpg +Places365_test_00016963.jpg +Places365_test_00016969.jpg +Places365_test_00016987.jpg +Places365_test_00016991.jpg +Places365_test_00016993.jpg +Places365_test_00017011.jpg +Places365_test_00017072.jpg +Places365_test_00017096.jpg +Places365_test_00017111.jpg +Places365_test_00017122.jpg +Places365_test_00017134.jpg +Places365_test_00017153.jpg +Places365_test_00017179.jpg +Places365_test_00017190.jpg +Places365_test_00017220.jpg +Places365_test_00017226.jpg +Places365_test_00017235.jpg +Places365_test_00017239.jpg +Places365_test_00017254.jpg +Places365_test_00017256.jpg +Places365_test_00017265.jpg +Places365_test_00017288.jpg +Places365_test_00017293.jpg +Places365_test_00017295.jpg +Places365_test_00017301.jpg +Places365_test_00017313.jpg +Places365_test_00017314.jpg +Places365_test_00017334.jpg +Places365_test_00017336.jpg +Places365_test_00017343.jpg +Places365_test_00017368.jpg +Places365_test_00017385.jpg +Places365_test_00017406.jpg +Places365_test_00017411.jpg +Places365_test_00017415.jpg +Places365_test_00017429.jpg +Places365_test_00017438.jpg +Places365_test_00017444.jpg +Places365_test_00017458.jpg +Places365_test_00017460.jpg +Places365_test_00017470.jpg +Places365_test_00017474.jpg +Places365_test_00017510.jpg +Places365_test_00017537.jpg +Places365_test_00017584.jpg +Places365_test_00017590.jpg +Places365_test_00017602.jpg +Places365_test_00017610.jpg +Places365_test_00017631.jpg +Places365_test_00017645.jpg +Places365_test_00017646.jpg +Places365_test_00017658.jpg +Places365_test_00017679.jpg +Places365_test_00017692.jpg +Places365_test_00017709.jpg +Places365_test_00017711.jpg +Places365_test_00017724.jpg +Places365_test_00017732.jpg +Places365_test_00017750.jpg +Places365_test_00017763.jpg +Places365_test_00017764.jpg +Places365_test_00017775.jpg +Places365_test_00017803.jpg +Places365_test_00017821.jpg +Places365_test_00017830.jpg +Places365_test_00017835.jpg +Places365_test_00017838.jpg +Places365_test_00017852.jpg +Places365_test_00017855.jpg +Places365_test_00017858.jpg +Places365_test_00017860.jpg +Places365_test_00017863.jpg +Places365_test_00017866.jpg +Places365_test_00017874.jpg +Places365_test_00017910.jpg +Places365_test_00017913.jpg +Places365_test_00017916.jpg +Places365_test_00017975.jpg +Places365_test_00017986.jpg +Places365_test_00018001.jpg +Places365_test_00018009.jpg +Places365_test_00018012.jpg +Places365_test_00018015.jpg +Places365_test_00018024.jpg +Places365_test_00018033.jpg +Places365_test_00018042.jpg +Places365_test_00018045.jpg +Places365_test_00018061.jpg +Places365_test_00018062.jpg +Places365_test_00018072.jpg +Places365_test_00018080.jpg +Places365_test_00018083.jpg +Places365_test_00018094.jpg +Places365_test_00018099.jpg +Places365_test_00018104.jpg +Places365_test_00018112.jpg +Places365_test_00018120.jpg +Places365_test_00018135.jpg +Places365_test_00018140.jpg +Places365_test_00018153.jpg +Places365_test_00018164.jpg +Places365_test_00018184.jpg +Places365_test_00018208.jpg +Places365_test_00018209.jpg +Places365_test_00018214.jpg +Places365_test_00018218.jpg +Places365_test_00018238.jpg +Places365_test_00018241.jpg +Places365_test_00018249.jpg +Places365_test_00018262.jpg +Places365_test_00018265.jpg +Places365_test_00018268.jpg +Places365_test_00018280.jpg +Places365_test_00018286.jpg +Places365_test_00018293.jpg +Places365_test_00018324.jpg +Places365_test_00018326.jpg +Places365_test_00018330.jpg +Places365_test_00018339.jpg +Places365_test_00018340.jpg +Places365_test_00018343.jpg +Places365_test_00018345.jpg +Places365_test_00018356.jpg +Places365_test_00018426.jpg +Places365_test_00018432.jpg +Places365_test_00018441.jpg +Places365_test_00018443.jpg +Places365_test_00018444.jpg +Places365_test_00018446.jpg +Places365_test_00018452.jpg +Places365_test_00018459.jpg +Places365_test_00018465.jpg +Places365_test_00018495.jpg +Places365_test_00018501.jpg +Places365_test_00018504.jpg +Places365_test_00018520.jpg +Places365_test_00018537.jpg +Places365_test_00018541.jpg +Places365_test_00018555.jpg +Places365_test_00018563.jpg +Places365_test_00018566.jpg +Places365_test_00018569.jpg +Places365_test_00018576.jpg +Places365_test_00018577.jpg +Places365_test_00018596.jpg +Places365_test_00018609.jpg +Places365_test_00018622.jpg +Places365_test_00018626.jpg +Places365_test_00018629.jpg +Places365_test_00018650.jpg +Places365_test_00018671.jpg +Places365_test_00018690.jpg +Places365_test_00018703.jpg +Places365_test_00018707.jpg +Places365_test_00018714.jpg +Places365_test_00018716.jpg +Places365_test_00018718.jpg +Places365_test_00018719.jpg +Places365_test_00018733.jpg +Places365_test_00018747.jpg +Places365_test_00018756.jpg +Places365_test_00018771.jpg +Places365_test_00018775.jpg +Places365_test_00018809.jpg +Places365_test_00018853.jpg +Places365_test_00018887.jpg +Places365_test_00018890.jpg +Places365_test_00018916.jpg +Places365_test_00018926.jpg +Places365_test_00018944.jpg +Places365_test_00018948.jpg +Places365_test_00018983.jpg +Places365_test_00018984.jpg +Places365_test_00018992.jpg +Places365_test_00018997.jpg +Places365_test_00018999.jpg +Places365_test_00019018.jpg +Places365_test_00019039.jpg +Places365_test_00019064.jpg +Places365_test_00019069.jpg +Places365_test_00019073.jpg +Places365_test_00019098.jpg +Places365_test_00019124.jpg +Places365_test_00019132.jpg +Places365_test_00019137.jpg +Places365_test_00019152.jpg +Places365_test_00019162.jpg +Places365_test_00019163.jpg +Places365_test_00019165.jpg +Places365_test_00019168.jpg +Places365_test_00019173.jpg +Places365_test_00019181.jpg +Places365_test_00019183.jpg +Places365_test_00019197.jpg +Places365_test_00019223.jpg +Places365_test_00019227.jpg +Places365_test_00019248.jpg +Places365_test_00019250.jpg +Places365_test_00019258.jpg +Places365_test_00019265.jpg +Places365_test_00019270.jpg +Places365_test_00019272.jpg +Places365_test_00019278.jpg +Places365_test_00019282.jpg +Places365_test_00019301.jpg +Places365_test_00019302.jpg +Places365_test_00019313.jpg +Places365_test_00019318.jpg +Places365_test_00019333.jpg +Places365_test_00019351.jpg +Places365_test_00019354.jpg +Places365_test_00019358.jpg +Places365_test_00019380.jpg +Places365_test_00019405.jpg +Places365_test_00019435.jpg +Places365_test_00019439.jpg +Places365_test_00019451.jpg +Places365_test_00019475.jpg +Places365_test_00019493.jpg +Places365_test_00019505.jpg +Places365_test_00019514.jpg +Places365_test_00019521.jpg +Places365_test_00019527.jpg +Places365_test_00019539.jpg +Places365_test_00019542.jpg +Places365_test_00019555.jpg +Places365_test_00019562.jpg +Places365_test_00019568.jpg +Places365_test_00019592.jpg +Places365_test_00019594.jpg +Places365_test_00019600.jpg +Places365_test_00019678.jpg +Places365_test_00019686.jpg +Places365_test_00019709.jpg +Places365_test_00019730.jpg +Places365_test_00019743.jpg +Places365_test_00019756.jpg +Places365_test_00019780.jpg +Places365_test_00019784.jpg +Places365_test_00019787.jpg +Places365_test_00019790.jpg +Places365_test_00019800.jpg +Places365_test_00019807.jpg +Places365_test_00019809.jpg +Places365_test_00019811.jpg +Places365_test_00019818.jpg +Places365_test_00019819.jpg +Places365_test_00019821.jpg +Places365_test_00019827.jpg +Places365_test_00019833.jpg +Places365_test_00019837.jpg +Places365_test_00019838.jpg +Places365_test_00019867.jpg +Places365_test_00019870.jpg +Places365_test_00019899.jpg +Places365_test_00019902.jpg +Places365_test_00019904.jpg +Places365_test_00019933.jpg +Places365_test_00019946.jpg +Places365_test_00019955.jpg +Places365_test_00019958.jpg +Places365_test_00019960.jpg +Places365_test_00019992.jpg +Places365_test_00019996.jpg +Places365_test_00020017.jpg +Places365_test_00020039.jpg +Places365_test_00020048.jpg +Places365_test_00020062.jpg +Places365_test_00020081.jpg +Places365_test_00020084.jpg +Places365_test_00020088.jpg +Places365_test_00020100.jpg +Places365_test_00020112.jpg +Places365_test_00020116.jpg +Places365_test_00020123.jpg +Places365_test_00020131.jpg +Places365_test_00020137.jpg +Places365_test_00020152.jpg +Places365_test_00020154.jpg +Places365_test_00020158.jpg +Places365_test_00020163.jpg +Places365_test_00020179.jpg +Places365_test_00020183.jpg +Places365_test_00020200.jpg +Places365_test_00020201.jpg +Places365_test_00020208.jpg +Places365_test_00020212.jpg +Places365_test_00020218.jpg +Places365_test_00020233.jpg +Places365_test_00020248.jpg +Places365_test_00020299.jpg +Places365_test_00020324.jpg +Places365_test_00020330.jpg +Places365_test_00020338.jpg +Places365_test_00020345.jpg +Places365_test_00020351.jpg +Places365_test_00020366.jpg +Places365_test_00020367.jpg +Places365_test_00020368.jpg +Places365_test_00020370.jpg +Places365_test_00020391.jpg +Places365_test_00020414.jpg +Places365_test_00020418.jpg +Places365_test_00020425.jpg +Places365_test_00020490.jpg +Places365_test_00020492.jpg +Places365_test_00020499.jpg +Places365_test_00020502.jpg +Places365_test_00020512.jpg +Places365_test_00020517.jpg +Places365_test_00020522.jpg +Places365_test_00020525.jpg +Places365_test_00020528.jpg +Places365_test_00020537.jpg +Places365_test_00020543.jpg +Places365_test_00020546.jpg +Places365_test_00020548.jpg +Places365_test_00020553.jpg +Places365_test_00020558.jpg +Places365_test_00020563.jpg +Places365_test_00020565.jpg +Places365_test_00020567.jpg +Places365_test_00020572.jpg +Places365_test_00020587.jpg +Places365_test_00020596.jpg +Places365_test_00020618.jpg +Places365_test_00020637.jpg +Places365_test_00020640.jpg +Places365_test_00020644.jpg +Places365_test_00020645.jpg +Places365_test_00020656.jpg +Places365_test_00020667.jpg +Places365_test_00020670.jpg +Places365_test_00020684.jpg +Places365_test_00020688.jpg +Places365_test_00020696.jpg +Places365_test_00020697.jpg +Places365_test_00020702.jpg +Places365_test_00020726.jpg +Places365_test_00020733.jpg +Places365_test_00020744.jpg +Places365_test_00020758.jpg +Places365_test_00020813.jpg +Places365_test_00020814.jpg +Places365_test_00020826.jpg +Places365_test_00020832.jpg +Places365_test_00020843.jpg +Places365_test_00020862.jpg +Places365_test_00020863.jpg +Places365_test_00020869.jpg +Places365_test_00020895.jpg +Places365_test_00020912.jpg +Places365_test_00020913.jpg +Places365_test_00020942.jpg +Places365_test_00020977.jpg +Places365_test_00020980.jpg +Places365_test_00020990.jpg +Places365_test_00021010.jpg +Places365_test_00021035.jpg +Places365_test_00021046.jpg +Places365_test_00021049.jpg +Places365_test_00021053.jpg +Places365_test_00021078.jpg +Places365_test_00021086.jpg +Places365_test_00021104.jpg +Places365_test_00021110.jpg +Places365_test_00021127.jpg +Places365_test_00021155.jpg +Places365_test_00021187.jpg +Places365_test_00021207.jpg +Places365_test_00021209.jpg +Places365_test_00021211.jpg +Places365_test_00021213.jpg +Places365_test_00021217.jpg +Places365_test_00021228.jpg +Places365_test_00021229.jpg +Places365_test_00021240.jpg +Places365_test_00021276.jpg +Places365_test_00021319.jpg +Places365_test_00021322.jpg +Places365_test_00021329.jpg +Places365_test_00021335.jpg +Places365_test_00021349.jpg +Places365_test_00021358.jpg +Places365_test_00021360.jpg +Places365_test_00021362.jpg +Places365_test_00021366.jpg +Places365_test_00021375.jpg +Places365_test_00021398.jpg +Places365_test_00021400.jpg +Places365_test_00021405.jpg +Places365_test_00021418.jpg +Places365_test_00021446.jpg +Places365_test_00021450.jpg +Places365_test_00021469.jpg +Places365_test_00021486.jpg +Places365_test_00021488.jpg +Places365_test_00021490.jpg +Places365_test_00021503.jpg +Places365_test_00021508.jpg +Places365_test_00021529.jpg +Places365_test_00021530.jpg +Places365_test_00021567.jpg +Places365_test_00021573.jpg +Places365_test_00021587.jpg +Places365_test_00021632.jpg +Places365_test_00021640.jpg +Places365_test_00021651.jpg +Places365_test_00021655.jpg +Places365_test_00021675.jpg +Places365_test_00021678.jpg +Places365_test_00021697.jpg +Places365_test_00021713.jpg +Places365_test_00021724.jpg +Places365_test_00021732.jpg +Places365_test_00021749.jpg +Places365_test_00021755.jpg +Places365_test_00021776.jpg +Places365_test_00021782.jpg +Places365_test_00021818.jpg +Places365_test_00021826.jpg +Places365_test_00021843.jpg +Places365_test_00021858.jpg +Places365_test_00021871.jpg +Places365_test_00021872.jpg +Places365_test_00021888.jpg +Places365_test_00021898.jpg +Places365_test_00021906.jpg +Places365_test_00021908.jpg +Places365_test_00021929.jpg +Places365_test_00021939.jpg +Places365_test_00021943.jpg +Places365_test_00021953.jpg +Places365_test_00021963.jpg +Places365_test_00021979.jpg +Places365_test_00021980.jpg +Places365_test_00021984.jpg +Places365_test_00022006.jpg +Places365_test_00022034.jpg +Places365_test_00022038.jpg +Places365_test_00022047.jpg +Places365_test_00022048.jpg +Places365_test_00022071.jpg +Places365_test_00022074.jpg +Places365_test_00022086.jpg +Places365_test_00022098.jpg +Places365_test_00022101.jpg +Places365_test_00022114.jpg +Places365_test_00022121.jpg +Places365_test_00022140.jpg +Places365_test_00022142.jpg +Places365_test_00022145.jpg +Places365_test_00022155.jpg +Places365_test_00022169.jpg +Places365_test_00022172.jpg +Places365_test_00022183.jpg +Places365_test_00022191.jpg +Places365_test_00022192.jpg +Places365_test_00022196.jpg +Places365_test_00022203.jpg +Places365_test_00022207.jpg +Places365_test_00022215.jpg +Places365_test_00022218.jpg +Places365_test_00022225.jpg +Places365_test_00022236.jpg +Places365_test_00022257.jpg +Places365_test_00022262.jpg +Places365_test_00022263.jpg +Places365_test_00022269.jpg +Places365_test_00022272.jpg +Places365_test_00022275.jpg +Places365_test_00022276.jpg +Places365_test_00022284.jpg +Places365_test_00022289.jpg +Places365_test_00022290.jpg +Places365_test_00022300.jpg +Places365_test_00022301.jpg +Places365_test_00022312.jpg +Places365_test_00022333.jpg +Places365_test_00022349.jpg +Places365_test_00022357.jpg +Places365_test_00022359.jpg +Places365_test_00022380.jpg +Places365_test_00022391.jpg +Places365_test_00022396.jpg +Places365_test_00022408.jpg +Places365_test_00022416.jpg +Places365_test_00022421.jpg +Places365_test_00022453.jpg +Places365_test_00022474.jpg +Places365_test_00022502.jpg +Places365_test_00022517.jpg +Places365_test_00022526.jpg +Places365_test_00022532.jpg +Places365_test_00022544.jpg +Places365_test_00022566.jpg +Places365_test_00022581.jpg +Places365_test_00022588.jpg +Places365_test_00022616.jpg +Places365_test_00022633.jpg +Places365_test_00022634.jpg +Places365_test_00022637.jpg +Places365_test_00022644.jpg +Places365_test_00022670.jpg +Places365_test_00022675.jpg +Places365_test_00022684.jpg +Places365_test_00022685.jpg +Places365_test_00022699.jpg +Places365_test_00022721.jpg +Places365_test_00022728.jpg +Places365_test_00022748.jpg +Places365_test_00022760.jpg +Places365_test_00022764.jpg +Places365_test_00022766.jpg +Places365_test_00022779.jpg +Places365_test_00022793.jpg +Places365_test_00022800.jpg +Places365_test_00022804.jpg +Places365_test_00022812.jpg +Places365_test_00022827.jpg +Places365_test_00022853.jpg +Places365_test_00022855.jpg +Places365_test_00022857.jpg +Places365_test_00022866.jpg +Places365_test_00022873.jpg +Places365_test_00022877.jpg +Places365_test_00022884.jpg +Places365_test_00022904.jpg +Places365_test_00022905.jpg +Places365_test_00022915.jpg +Places365_test_00022943.jpg +Places365_test_00022947.jpg +Places365_test_00022958.jpg +Places365_test_00023016.jpg +Places365_test_00023040.jpg +Places365_test_00023086.jpg +Places365_test_00023092.jpg +Places365_test_00023097.jpg +Places365_test_00023110.jpg +Places365_test_00023126.jpg +Places365_test_00023129.jpg +Places365_test_00023134.jpg +Places365_test_00023140.jpg +Places365_test_00023147.jpg +Places365_test_00023149.jpg +Places365_test_00023201.jpg +Places365_test_00023206.jpg +Places365_test_00023213.jpg +Places365_test_00023215.jpg +Places365_test_00023224.jpg +Places365_test_00023256.jpg +Places365_test_00023257.jpg +Places365_test_00023262.jpg +Places365_test_00023286.jpg +Places365_test_00023308.jpg +Places365_test_00023314.jpg +Places365_test_00023316.jpg +Places365_test_00023325.jpg +Places365_test_00023333.jpg +Places365_test_00023355.jpg +Places365_test_00023383.jpg +Places365_test_00023389.jpg +Places365_test_00023402.jpg +Places365_test_00023414.jpg +Places365_test_00023442.jpg +Places365_test_00023456.jpg +Places365_test_00023457.jpg +Places365_test_00023461.jpg +Places365_test_00023489.jpg +Places365_test_00023491.jpg +Places365_test_00023496.jpg +Places365_test_00023507.jpg +Places365_test_00023515.jpg +Places365_test_00023520.jpg +Places365_test_00023522.jpg +Places365_test_00023524.jpg +Places365_test_00023533.jpg +Places365_test_00023538.jpg +Places365_test_00023555.jpg +Places365_test_00023564.jpg +Places365_test_00023576.jpg +Places365_test_00023584.jpg +Places365_test_00023588.jpg +Places365_test_00023602.jpg +Places365_test_00023605.jpg +Places365_test_00023617.jpg +Places365_test_00023633.jpg +Places365_test_00023650.jpg +Places365_test_00023659.jpg +Places365_test_00023662.jpg +Places365_test_00023663.jpg +Places365_test_00023679.jpg +Places365_test_00023694.jpg +Places365_test_00023715.jpg +Places365_test_00023717.jpg +Places365_test_00023718.jpg +Places365_test_00023723.jpg +Places365_test_00023725.jpg +Places365_test_00023728.jpg +Places365_test_00023732.jpg +Places365_test_00023737.jpg +Places365_test_00023757.jpg +Places365_test_00023760.jpg +Places365_test_00023762.jpg +Places365_test_00023765.jpg +Places365_test_00023770.jpg +Places365_test_00023772.jpg +Places365_test_00023792.jpg +Places365_test_00023824.jpg +Places365_test_00023833.jpg +Places365_test_00023839.jpg +Places365_test_00023855.jpg +Places365_test_00023858.jpg +Places365_test_00023896.jpg +Places365_test_00023916.jpg +Places365_test_00023925.jpg +Places365_test_00023930.jpg +Places365_test_00023940.jpg +Places365_test_00023950.jpg +Places365_test_00023960.jpg +Places365_test_00023974.jpg +Places365_test_00023984.jpg +Places365_test_00023986.jpg +Places365_test_00023987.jpg +Places365_test_00023989.jpg +Places365_test_00023994.jpg +Places365_test_00023995.jpg +Places365_test_00024006.jpg +Places365_test_00024017.jpg +Places365_test_00024032.jpg +Places365_test_00024034.jpg +Places365_test_00024057.jpg +Places365_test_00024089.jpg +Places365_test_00024091.jpg +Places365_test_00024101.jpg +Places365_test_00024106.jpg +Places365_test_00024121.jpg +Places365_test_00024152.jpg +Places365_test_00024159.jpg +Places365_test_00024173.jpg +Places365_test_00024189.jpg +Places365_test_00024197.jpg +Places365_test_00024198.jpg +Places365_test_00024225.jpg +Places365_test_00024233.jpg +Places365_test_00024236.jpg +Places365_test_00024237.jpg +Places365_test_00024238.jpg +Places365_test_00024255.jpg +Places365_test_00024260.jpg +Places365_test_00024262.jpg +Places365_test_00024282.jpg +Places365_test_00024291.jpg +Places365_test_00024299.jpg +Places365_test_00024302.jpg +Places365_test_00024307.jpg +Places365_test_00024317.jpg +Places365_test_00024335.jpg +Places365_test_00024336.jpg +Places365_test_00024338.jpg +Places365_test_00024343.jpg +Places365_test_00024351.jpg +Places365_test_00024368.jpg +Places365_test_00024386.jpg +Places365_test_00024398.jpg +Places365_test_00024399.jpg +Places365_test_00024403.jpg +Places365_test_00024408.jpg +Places365_test_00024421.jpg +Places365_test_00024425.jpg +Places365_test_00024438.jpg +Places365_test_00024445.jpg +Places365_test_00024448.jpg +Places365_test_00024449.jpg +Places365_test_00024474.jpg +Places365_test_00024491.jpg +Places365_test_00024508.jpg +Places365_test_00024512.jpg +Places365_test_00024531.jpg +Places365_test_00024543.jpg +Places365_test_00024547.jpg +Places365_test_00024560.jpg +Places365_test_00024570.jpg +Places365_test_00024585.jpg +Places365_test_00024593.jpg +Places365_test_00024600.jpg +Places365_test_00024601.jpg +Places365_test_00024613.jpg +Places365_test_00024616.jpg +Places365_test_00024630.jpg +Places365_test_00024654.jpg +Places365_test_00024667.jpg +Places365_test_00024675.jpg +Places365_test_00024677.jpg +Places365_test_00024709.jpg +Places365_test_00024714.jpg +Places365_test_00024718.jpg +Places365_test_00024721.jpg +Places365_test_00024726.jpg +Places365_test_00024731.jpg +Places365_test_00024734.jpg +Places365_test_00024741.jpg +Places365_test_00024758.jpg +Places365_test_00024778.jpg +Places365_test_00024779.jpg +Places365_test_00024789.jpg +Places365_test_00024804.jpg +Places365_test_00024813.jpg +Places365_test_00024824.jpg +Places365_test_00024853.jpg +Places365_test_00024858.jpg +Places365_test_00024887.jpg +Places365_test_00024893.jpg +Places365_test_00024897.jpg +Places365_test_00024905.jpg +Places365_test_00024919.jpg +Places365_test_00024922.jpg +Places365_test_00024923.jpg +Places365_test_00024930.jpg +Places365_test_00024939.jpg +Places365_test_00024940.jpg +Places365_test_00024959.jpg +Places365_test_00024960.jpg +Places365_test_00024961.jpg +Places365_test_00024967.jpg +Places365_test_00024980.jpg +Places365_test_00024997.jpg +Places365_test_00025001.jpg +Places365_test_00025028.jpg +Places365_test_00025041.jpg +Places365_test_00025042.jpg +Places365_test_00025069.jpg +Places365_test_00025081.jpg +Places365_test_00025083.jpg +Places365_test_00025084.jpg +Places365_test_00025091.jpg +Places365_test_00025106.jpg +Places365_test_00025110.jpg +Places365_test_00025119.jpg +Places365_test_00025130.jpg +Places365_test_00025137.jpg +Places365_test_00025153.jpg +Places365_test_00025164.jpg +Places365_test_00025167.jpg +Places365_test_00025168.jpg +Places365_test_00025185.jpg +Places365_test_00025197.jpg +Places365_test_00025206.jpg +Places365_test_00025227.jpg +Places365_test_00025243.jpg +Places365_test_00025250.jpg +Places365_test_00025268.jpg +Places365_test_00025275.jpg +Places365_test_00025289.jpg +Places365_test_00025325.jpg +Places365_test_00025343.jpg +Places365_test_00025349.jpg +Places365_test_00025350.jpg +Places365_test_00025357.jpg +Places365_test_00025380.jpg +Places365_test_00025436.jpg +Places365_test_00025437.jpg +Places365_test_00025439.jpg +Places365_test_00025444.jpg +Places365_test_00025451.jpg +Places365_test_00025467.jpg +Places365_test_00025470.jpg +Places365_test_00025473.jpg +Places365_test_00025491.jpg +Places365_test_00025513.jpg +Places365_test_00025539.jpg +Places365_test_00025550.jpg +Places365_test_00025553.jpg +Places365_test_00025560.jpg +Places365_test_00025572.jpg +Places365_test_00025575.jpg +Places365_test_00025577.jpg +Places365_test_00025578.jpg +Places365_test_00025580.jpg +Places365_test_00025631.jpg +Places365_test_00025643.jpg +Places365_test_00025647.jpg +Places365_test_00025652.jpg +Places365_test_00025655.jpg +Places365_test_00025660.jpg +Places365_test_00025666.jpg +Places365_test_00025668.jpg +Places365_test_00025682.jpg +Places365_test_00025686.jpg +Places365_test_00025690.jpg +Places365_test_00025736.jpg +Places365_test_00025760.jpg +Places365_test_00025761.jpg +Places365_test_00025768.jpg +Places365_test_00025769.jpg +Places365_test_00025771.jpg +Places365_test_00025773.jpg +Places365_test_00025774.jpg +Places365_test_00025780.jpg +Places365_test_00025781.jpg +Places365_test_00025782.jpg +Places365_test_00025788.jpg +Places365_test_00025801.jpg +Places365_test_00025826.jpg +Places365_test_00025840.jpg +Places365_test_00025841.jpg +Places365_test_00025850.jpg +Places365_test_00025859.jpg +Places365_test_00025861.jpg +Places365_test_00025863.jpg +Places365_test_00025881.jpg +Places365_test_00025894.jpg +Places365_test_00025895.jpg +Places365_test_00025898.jpg +Places365_test_00025931.jpg +Places365_test_00025937.jpg +Places365_test_00025952.jpg +Places365_test_00025966.jpg +Places365_test_00025970.jpg +Places365_test_00025985.jpg +Places365_test_00025991.jpg +Places365_test_00025992.jpg +Places365_test_00025997.jpg +Places365_test_00026001.jpg +Places365_test_00026002.jpg +Places365_test_00026004.jpg +Places365_test_00026026.jpg +Places365_test_00026052.jpg +Places365_test_00026053.jpg +Places365_test_00026057.jpg +Places365_test_00026066.jpg +Places365_test_00026109.jpg +Places365_test_00026110.jpg +Places365_test_00026126.jpg +Places365_test_00026141.jpg +Places365_test_00026152.jpg +Places365_test_00026154.jpg +Places365_test_00026157.jpg +Places365_test_00026159.jpg +Places365_test_00026164.jpg +Places365_test_00026165.jpg +Places365_test_00026170.jpg +Places365_test_00026174.jpg +Places365_test_00026200.jpg +Places365_test_00026208.jpg +Places365_test_00026220.jpg +Places365_test_00026235.jpg +Places365_test_00026236.jpg +Places365_test_00026248.jpg +Places365_test_00026302.jpg +Places365_test_00026303.jpg +Places365_test_00026314.jpg +Places365_test_00026316.jpg +Places365_test_00026317.jpg +Places365_test_00026339.jpg +Places365_test_00026340.jpg +Places365_test_00026359.jpg +Places365_test_00026375.jpg +Places365_test_00026445.jpg +Places365_test_00026480.jpg +Places365_test_00026487.jpg +Places365_test_00026496.jpg +Places365_test_00026505.jpg +Places365_test_00026523.jpg +Places365_test_00026525.jpg +Places365_test_00026554.jpg +Places365_test_00026562.jpg +Places365_test_00026567.jpg +Places365_test_00026569.jpg +Places365_test_00026570.jpg +Places365_test_00026576.jpg +Places365_test_00026617.jpg +Places365_test_00026620.jpg +Places365_test_00026646.jpg +Places365_test_00026647.jpg +Places365_test_00026671.jpg +Places365_test_00026689.jpg +Places365_test_00026711.jpg +Places365_test_00026713.jpg +Places365_test_00026729.jpg +Places365_test_00026735.jpg +Places365_test_00026739.jpg +Places365_test_00026753.jpg +Places365_test_00026756.jpg +Places365_test_00026788.jpg +Places365_test_00026796.jpg +Places365_test_00026797.jpg +Places365_test_00026798.jpg +Places365_test_00026800.jpg +Places365_test_00026801.jpg +Places365_test_00026803.jpg +Places365_test_00026807.jpg +Places365_test_00026837.jpg +Places365_test_00026851.jpg +Places365_test_00026855.jpg +Places365_test_00026877.jpg +Places365_test_00026878.jpg +Places365_test_00026888.jpg +Places365_test_00026905.jpg +Places365_test_00026907.jpg +Places365_test_00026908.jpg +Places365_test_00026917.jpg +Places365_test_00026926.jpg +Places365_test_00026927.jpg +Places365_test_00026939.jpg +Places365_test_00026951.jpg +Places365_test_00026953.jpg +Places365_test_00026955.jpg +Places365_test_00026958.jpg +Places365_test_00026966.jpg +Places365_test_00026971.jpg +Places365_test_00026979.jpg +Places365_test_00026980.jpg +Places365_test_00026990.jpg +Places365_test_00026992.jpg +Places365_test_00026995.jpg +Places365_test_00027000.jpg +Places365_test_00027002.jpg +Places365_test_00027003.jpg +Places365_test_00027009.jpg +Places365_test_00027021.jpg +Places365_test_00027026.jpg +Places365_test_00027043.jpg +Places365_test_00027048.jpg +Places365_test_00027050.jpg +Places365_test_00027057.jpg +Places365_test_00027060.jpg +Places365_test_00027062.jpg +Places365_test_00027077.jpg +Places365_test_00027116.jpg +Places365_test_00027118.jpg +Places365_test_00027122.jpg +Places365_test_00027126.jpg +Places365_test_00027133.jpg +Places365_test_00027147.jpg +Places365_test_00027173.jpg +Places365_test_00027201.jpg +Places365_test_00027203.jpg +Places365_test_00027205.jpg +Places365_test_00027217.jpg +Places365_test_00027223.jpg +Places365_test_00027226.jpg +Places365_test_00027248.jpg +Places365_test_00027249.jpg +Places365_test_00027254.jpg +Places365_test_00027289.jpg +Places365_test_00027294.jpg +Places365_test_00027300.jpg +Places365_test_00027309.jpg +Places365_test_00027328.jpg +Places365_test_00027338.jpg +Places365_test_00027340.jpg +Places365_test_00027344.jpg +Places365_test_00027349.jpg +Places365_test_00027355.jpg +Places365_test_00027376.jpg +Places365_test_00027417.jpg +Places365_test_00027421.jpg +Places365_test_00027429.jpg +Places365_test_00027432.jpg +Places365_test_00027458.jpg +Places365_test_00027471.jpg +Places365_test_00027475.jpg +Places365_test_00027487.jpg +Places365_test_00027489.jpg +Places365_test_00027497.jpg +Places365_test_00027506.jpg +Places365_test_00027530.jpg +Places365_test_00027540.jpg +Places365_test_00027542.jpg +Places365_test_00027552.jpg +Places365_test_00027579.jpg +Places365_test_00027590.jpg +Places365_test_00027610.jpg +Places365_test_00027635.jpg +Places365_test_00027639.jpg +Places365_test_00027643.jpg +Places365_test_00027647.jpg +Places365_test_00027649.jpg +Places365_test_00027651.jpg +Places365_test_00027666.jpg +Places365_test_00027667.jpg +Places365_test_00027687.jpg +Places365_test_00027692.jpg +Places365_test_00027693.jpg +Places365_test_00027694.jpg +Places365_test_00027703.jpg +Places365_test_00027729.jpg +Places365_test_00027752.jpg +Places365_test_00027754.jpg +Places365_test_00027756.jpg +Places365_test_00027764.jpg +Places365_test_00027766.jpg +Places365_test_00027771.jpg +Places365_test_00027803.jpg +Places365_test_00027811.jpg +Places365_test_00027812.jpg +Places365_test_00027852.jpg +Places365_test_00027869.jpg +Places365_test_00027883.jpg +Places365_test_00027887.jpg +Places365_test_00027892.jpg +Places365_test_00027933.jpg +Places365_test_00027941.jpg +Places365_test_00027942.jpg +Places365_test_00027951.jpg +Places365_test_00027958.jpg +Places365_test_00027963.jpg +Places365_test_00028013.jpg +Places365_test_00028021.jpg +Places365_test_00028024.jpg +Places365_test_00028045.jpg +Places365_test_00028049.jpg +Places365_test_00028097.jpg +Places365_test_00028102.jpg +Places365_test_00028110.jpg +Places365_test_00028115.jpg +Places365_test_00028122.jpg +Places365_test_00028123.jpg +Places365_test_00028127.jpg +Places365_test_00028130.jpg +Places365_test_00028138.jpg +Places365_test_00028155.jpg +Places365_test_00028161.jpg +Places365_test_00028168.jpg +Places365_test_00028173.jpg +Places365_test_00028174.jpg +Places365_test_00028206.jpg +Places365_test_00028226.jpg +Places365_test_00028228.jpg +Places365_test_00028252.jpg +Places365_test_00028258.jpg +Places365_test_00028262.jpg +Places365_test_00028264.jpg +Places365_test_00028277.jpg +Places365_test_00028313.jpg +Places365_test_00028318.jpg +Places365_test_00028326.jpg +Places365_test_00028331.jpg +Places365_test_00028337.jpg +Places365_test_00028366.jpg +Places365_test_00028370.jpg +Places365_test_00028375.jpg +Places365_test_00028391.jpg +Places365_test_00028428.jpg +Places365_test_00028432.jpg +Places365_test_00028449.jpg +Places365_test_00028451.jpg +Places365_test_00028462.jpg +Places365_test_00028463.jpg +Places365_test_00028464.jpg +Places365_test_00028476.jpg +Places365_test_00028478.jpg +Places365_test_00028490.jpg +Places365_test_00028491.jpg +Places365_test_00028494.jpg +Places365_test_00028496.jpg +Places365_test_00028501.jpg +Places365_test_00028502.jpg +Places365_test_00028521.jpg +Places365_test_00028535.jpg +Places365_test_00028539.jpg +Places365_test_00028541.jpg +Places365_test_00028550.jpg +Places365_test_00028552.jpg +Places365_test_00028553.jpg +Places365_test_00028559.jpg +Places365_test_00028569.jpg +Places365_test_00028576.jpg +Places365_test_00028583.jpg +Places365_test_00028595.jpg +Places365_test_00028600.jpg +Places365_test_00028609.jpg +Places365_test_00028623.jpg +Places365_test_00028633.jpg +Places365_test_00028642.jpg +Places365_test_00028644.jpg +Places365_test_00028657.jpg +Places365_test_00028680.jpg +Places365_test_00028712.jpg +Places365_test_00028714.jpg +Places365_test_00028728.jpg +Places365_test_00028750.jpg +Places365_test_00028753.jpg +Places365_test_00028756.jpg +Places365_test_00028761.jpg +Places365_test_00028762.jpg +Places365_test_00028773.jpg +Places365_test_00028802.jpg +Places365_test_00028853.jpg +Places365_test_00028855.jpg +Places365_test_00028857.jpg +Places365_test_00028870.jpg +Places365_test_00028877.jpg +Places365_test_00028884.jpg +Places365_test_00028887.jpg +Places365_test_00028888.jpg +Places365_test_00028900.jpg +Places365_test_00028903.jpg +Places365_test_00028908.jpg +Places365_test_00028920.jpg +Places365_test_00028943.jpg +Places365_test_00028950.jpg +Places365_test_00028954.jpg +Places365_test_00028979.jpg +Places365_test_00028999.jpg +Places365_test_00029003.jpg +Places365_test_00029005.jpg +Places365_test_00029007.jpg +Places365_test_00029015.jpg +Places365_test_00029038.jpg +Places365_test_00029048.jpg +Places365_test_00029081.jpg +Places365_test_00029096.jpg +Places365_test_00029105.jpg +Places365_test_00029133.jpg +Places365_test_00029135.jpg +Places365_test_00029150.jpg +Places365_test_00029152.jpg +Places365_test_00029154.jpg +Places365_test_00029182.jpg +Places365_test_00029199.jpg +Places365_test_00029202.jpg +Places365_test_00029204.jpg +Places365_test_00029209.jpg +Places365_test_00029222.jpg +Places365_test_00029247.jpg +Places365_test_00029267.jpg +Places365_test_00029296.jpg +Places365_test_00029302.jpg +Places365_test_00029309.jpg +Places365_test_00029316.jpg +Places365_test_00029322.jpg +Places365_test_00029323.jpg +Places365_test_00029331.jpg +Places365_test_00029333.jpg +Places365_test_00029350.jpg +Places365_test_00029363.jpg +Places365_test_00029370.jpg +Places365_test_00029384.jpg +Places365_test_00029389.jpg +Places365_test_00029400.jpg +Places365_test_00029408.jpg +Places365_test_00029439.jpg +Places365_test_00029455.jpg +Places365_test_00029460.jpg +Places365_test_00029463.jpg +Places365_test_00029487.jpg +Places365_test_00029503.jpg +Places365_test_00029507.jpg +Places365_test_00029519.jpg +Places365_test_00029524.jpg +Places365_test_00029526.jpg +Places365_test_00029528.jpg +Places365_test_00029552.jpg +Places365_test_00029565.jpg +Places365_test_00029610.jpg +Places365_test_00029631.jpg +Places365_test_00029632.jpg +Places365_test_00029634.jpg +Places365_test_00029644.jpg +Places365_test_00029656.jpg +Places365_test_00029665.jpg +Places365_test_00029670.jpg +Places365_test_00029672.jpg +Places365_test_00029695.jpg +Places365_test_00029702.jpg +Places365_test_00029707.jpg +Places365_test_00029711.jpg +Places365_test_00029716.jpg +Places365_test_00029746.jpg +Places365_test_00029753.jpg +Places365_test_00029761.jpg +Places365_test_00029772.jpg +Places365_test_00029776.jpg +Places365_test_00029790.jpg +Places365_test_00029818.jpg +Places365_test_00029819.jpg +Places365_test_00029827.jpg +Places365_test_00029832.jpg +Places365_test_00029838.jpg +Places365_test_00029873.jpg +Places365_test_00029879.jpg +Places365_test_00029922.jpg +Places365_test_00029943.jpg +Places365_test_00029952.jpg +Places365_test_00029962.jpg +Places365_test_00029963.jpg +Places365_test_00029982.jpg +Places365_test_00029989.jpg +Places365_test_00029991.jpg +Places365_test_00030005.jpg +Places365_test_00030008.jpg +Places365_test_00030018.jpg +Places365_test_00030024.jpg +Places365_test_00030049.jpg +Places365_test_00030055.jpg +Places365_test_00030056.jpg +Places365_test_00030057.jpg +Places365_test_00030070.jpg +Places365_test_00030082.jpg +Places365_test_00030099.jpg +Places365_test_00030100.jpg +Places365_test_00030103.jpg +Places365_test_00030107.jpg +Places365_test_00030114.jpg +Places365_test_00030115.jpg +Places365_test_00030116.jpg +Places365_test_00030125.jpg +Places365_test_00030129.jpg +Places365_test_00030134.jpg +Places365_test_00030143.jpg +Places365_test_00030147.jpg +Places365_test_00030158.jpg +Places365_test_00030161.jpg +Places365_test_00030178.jpg +Places365_test_00030185.jpg +Places365_test_00030198.jpg +Places365_test_00030201.jpg +Places365_test_00030217.jpg +Places365_test_00030233.jpg +Places365_test_00030235.jpg +Places365_test_00030259.jpg +Places365_test_00030261.jpg +Places365_test_00030268.jpg +Places365_test_00030272.jpg +Places365_test_00030282.jpg +Places365_test_00030287.jpg +Places365_test_00030290.jpg +Places365_test_00030297.jpg +Places365_test_00030301.jpg +Places365_test_00030308.jpg +Places365_test_00030313.jpg +Places365_test_00030319.jpg +Places365_test_00030322.jpg +Places365_test_00030328.jpg +Places365_test_00030337.jpg +Places365_test_00030351.jpg +Places365_test_00030352.jpg +Places365_test_00030373.jpg +Places365_test_00030377.jpg +Places365_test_00030380.jpg +Places365_test_00030408.jpg +Places365_test_00030410.jpg +Places365_test_00030421.jpg +Places365_test_00030435.jpg +Places365_test_00030440.jpg +Places365_test_00030445.jpg +Places365_test_00030446.jpg +Places365_test_00030449.jpg +Places365_test_00030457.jpg +Places365_test_00030465.jpg +Places365_test_00030468.jpg +Places365_test_00030469.jpg +Places365_test_00030494.jpg +Places365_test_00030500.jpg +Places365_test_00030510.jpg +Places365_test_00030513.jpg +Places365_test_00030515.jpg +Places365_test_00030524.jpg +Places365_test_00030525.jpg +Places365_test_00030530.jpg +Places365_test_00030549.jpg +Places365_test_00030553.jpg +Places365_test_00030567.jpg +Places365_test_00030580.jpg +Places365_test_00030585.jpg +Places365_test_00030588.jpg +Places365_test_00030590.jpg +Places365_test_00030592.jpg +Places365_test_00030595.jpg +Places365_test_00030602.jpg +Places365_test_00030609.jpg +Places365_test_00030616.jpg +Places365_test_00030626.jpg +Places365_test_00030632.jpg +Places365_test_00030633.jpg +Places365_test_00030641.jpg +Places365_test_00030643.jpg +Places365_test_00030664.jpg +Places365_test_00030669.jpg +Places365_test_00030675.jpg +Places365_test_00030682.jpg +Places365_test_00030702.jpg +Places365_test_00030712.jpg +Places365_test_00030719.jpg +Places365_test_00030807.jpg +Places365_test_00030814.jpg +Places365_test_00030816.jpg +Places365_test_00030821.jpg +Places365_test_00030834.jpg +Places365_test_00030839.jpg +Places365_test_00030857.jpg +Places365_test_00030887.jpg +Places365_test_00030900.jpg +Places365_test_00030915.jpg +Places365_test_00030918.jpg +Places365_test_00030952.jpg +Places365_test_00030970.jpg +Places365_test_00030973.jpg +Places365_test_00030984.jpg +Places365_test_00030985.jpg +Places365_test_00030989.jpg +Places365_test_00030991.jpg +Places365_test_00031012.jpg +Places365_test_00031020.jpg +Places365_test_00031026.jpg +Places365_test_00031039.jpg +Places365_test_00031041.jpg +Places365_test_00031043.jpg +Places365_test_00031044.jpg +Places365_test_00031046.jpg +Places365_test_00031057.jpg +Places365_test_00031064.jpg +Places365_test_00031075.jpg +Places365_test_00031077.jpg +Places365_test_00031095.jpg +Places365_test_00031107.jpg +Places365_test_00031109.jpg +Places365_test_00031117.jpg +Places365_test_00031121.jpg +Places365_test_00031122.jpg +Places365_test_00031124.jpg +Places365_test_00031144.jpg +Places365_test_00031149.jpg +Places365_test_00031156.jpg +Places365_test_00031183.jpg +Places365_test_00031210.jpg +Places365_test_00031246.jpg +Places365_test_00031258.jpg +Places365_test_00031261.jpg +Places365_test_00031266.jpg +Places365_test_00031268.jpg +Places365_test_00031281.jpg +Places365_test_00031283.jpg +Places365_test_00031288.jpg +Places365_test_00031289.jpg +Places365_test_00031300.jpg +Places365_test_00031302.jpg +Places365_test_00031306.jpg +Places365_test_00031307.jpg +Places365_test_00031330.jpg +Places365_test_00031339.jpg +Places365_test_00031357.jpg +Places365_test_00031374.jpg +Places365_test_00031375.jpg +Places365_test_00031377.jpg +Places365_test_00031380.jpg +Places365_test_00031396.jpg +Places365_test_00031413.jpg +Places365_test_00031415.jpg +Places365_test_00031421.jpg +Places365_test_00031450.jpg +Places365_test_00031452.jpg +Places365_test_00031486.jpg +Places365_test_00031508.jpg +Places365_test_00031516.jpg +Places365_test_00031528.jpg +Places365_test_00031541.jpg +Places365_test_00031571.jpg +Places365_test_00031582.jpg +Places365_test_00031584.jpg +Places365_test_00031607.jpg +Places365_test_00031623.jpg +Places365_test_00031638.jpg +Places365_test_00031692.jpg +Places365_test_00031703.jpg +Places365_test_00031705.jpg +Places365_test_00031707.jpg +Places365_test_00031720.jpg +Places365_test_00031723.jpg +Places365_test_00031732.jpg +Places365_test_00031744.jpg +Places365_test_00031751.jpg +Places365_test_00031769.jpg +Places365_test_00031772.jpg +Places365_test_00031782.jpg +Places365_test_00031790.jpg +Places365_test_00031823.jpg +Places365_test_00031833.jpg +Places365_test_00031875.jpg +Places365_test_00031884.jpg +Places365_test_00031902.jpg +Places365_test_00031921.jpg +Places365_test_00031927.jpg +Places365_test_00031931.jpg +Places365_test_00031960.jpg +Places365_test_00031966.jpg +Places365_test_00031990.jpg +Places365_test_00032001.jpg +Places365_test_00032014.jpg +Places365_test_00032021.jpg +Places365_test_00032030.jpg +Places365_test_00032054.jpg +Places365_test_00032068.jpg +Places365_test_00032097.jpg +Places365_test_00032112.jpg +Places365_test_00032120.jpg +Places365_test_00032151.jpg +Places365_test_00032152.jpg +Places365_test_00032153.jpg +Places365_test_00032154.jpg +Places365_test_00032159.jpg +Places365_test_00032162.jpg +Places365_test_00032187.jpg +Places365_test_00032193.jpg +Places365_test_00032206.jpg +Places365_test_00032209.jpg +Places365_test_00032262.jpg +Places365_test_00032269.jpg +Places365_test_00032290.jpg +Places365_test_00032303.jpg +Places365_test_00032306.jpg +Places365_test_00032320.jpg +Places365_test_00032329.jpg +Places365_test_00032340.jpg +Places365_test_00032352.jpg +Places365_test_00032355.jpg +Places365_test_00032361.jpg +Places365_test_00032365.jpg +Places365_test_00032373.jpg +Places365_test_00032382.jpg +Places365_test_00032386.jpg +Places365_test_00032411.jpg +Places365_test_00032434.jpg +Places365_test_00032435.jpg +Places365_test_00032467.jpg +Places365_test_00032468.jpg +Places365_test_00032483.jpg +Places365_test_00032484.jpg +Places365_test_00032498.jpg +Places365_test_00032499.jpg +Places365_test_00032502.jpg +Places365_test_00032529.jpg +Places365_test_00032575.jpg +Places365_test_00032591.jpg +Places365_test_00032598.jpg +Places365_test_00032601.jpg +Places365_test_00032608.jpg +Places365_test_00032626.jpg +Places365_test_00032650.jpg +Places365_test_00032655.jpg +Places365_test_00032661.jpg +Places365_test_00032676.jpg +Places365_test_00032682.jpg +Places365_test_00032708.jpg +Places365_test_00032735.jpg +Places365_test_00032742.jpg +Places365_test_00032764.jpg +Places365_test_00032776.jpg +Places365_test_00032777.jpg +Places365_test_00032795.jpg +Places365_test_00032796.jpg +Places365_test_00032808.jpg +Places365_test_00032818.jpg +Places365_test_00032819.jpg +Places365_test_00032832.jpg +Places365_test_00032850.jpg +Places365_test_00032851.jpg +Places365_test_00032852.jpg +Places365_test_00032877.jpg +Places365_test_00032880.jpg +Places365_test_00032903.jpg +Places365_test_00032920.jpg +Places365_test_00032930.jpg +Places365_test_00032949.jpg +Places365_test_00032953.jpg +Places365_test_00032957.jpg +Places365_test_00032975.jpg +Places365_test_00032981.jpg +Places365_test_00033001.jpg +Places365_test_00033003.jpg +Places365_test_00033007.jpg +Places365_test_00033022.jpg +Places365_test_00033024.jpg +Places365_test_00033038.jpg +Places365_test_00033050.jpg +Places365_test_00033059.jpg +Places365_test_00033064.jpg +Places365_test_00033077.jpg +Places365_test_00033079.jpg +Places365_test_00033084.jpg +Places365_test_00033090.jpg +Places365_test_00033102.jpg +Places365_test_00033105.jpg +Places365_test_00033110.jpg +Places365_test_00033125.jpg +Places365_test_00033126.jpg +Places365_test_00033136.jpg +Places365_test_00033138.jpg +Places365_test_00033140.jpg +Places365_test_00033143.jpg +Places365_test_00033144.jpg +Places365_test_00033150.jpg +Places365_test_00033155.jpg +Places365_test_00033173.jpg +Places365_test_00033180.jpg +Places365_test_00033184.jpg +Places365_test_00033215.jpg +Places365_test_00033216.jpg +Places365_test_00033229.jpg +Places365_test_00033232.jpg +Places365_test_00033270.jpg +Places365_test_00033273.jpg +Places365_test_00033276.jpg +Places365_test_00033279.jpg +Places365_test_00033283.jpg +Places365_test_00033294.jpg +Places365_test_00033301.jpg +Places365_test_00033307.jpg +Places365_test_00033311.jpg +Places365_test_00033324.jpg +Places365_test_00033328.jpg +Places365_test_00033332.jpg +Places365_test_00033345.jpg +Places365_test_00033348.jpg +Places365_test_00033358.jpg +Places365_test_00033360.jpg +Places365_test_00033376.jpg +Places365_test_00033407.jpg +Places365_test_00033408.jpg +Places365_test_00033418.jpg +Places365_test_00033421.jpg +Places365_test_00033427.jpg +Places365_test_00033428.jpg +Places365_test_00033434.jpg +Places365_test_00033475.jpg +Places365_test_00033492.jpg +Places365_test_00033503.jpg +Places365_test_00033528.jpg +Places365_test_00033547.jpg +Places365_test_00033556.jpg +Places365_test_00033562.jpg +Places365_test_00033572.jpg +Places365_test_00033579.jpg +Places365_test_00033588.jpg +Places365_test_00033626.jpg +Places365_test_00033643.jpg +Places365_test_00033660.jpg +Places365_test_00033668.jpg +Places365_test_00033669.jpg +Places365_test_00033671.jpg +Places365_test_00033682.jpg +Places365_test_00033683.jpg +Places365_test_00033696.jpg +Places365_test_00033705.jpg +Places365_test_00033708.jpg +Places365_test_00033710.jpg +Places365_test_00033711.jpg +Places365_test_00033712.jpg +Places365_test_00033744.jpg +Places365_test_00033772.jpg +Places365_test_00033778.jpg +Places365_test_00033779.jpg +Places365_test_00033790.jpg +Places365_test_00033811.jpg +Places365_test_00033821.jpg +Places365_test_00033839.jpg +Places365_test_00033842.jpg +Places365_test_00033853.jpg +Places365_test_00033862.jpg +Places365_test_00033865.jpg +Places365_test_00033895.jpg +Places365_test_00033919.jpg +Places365_test_00033929.jpg +Places365_test_00033947.jpg +Places365_test_00033961.jpg +Places365_test_00033970.jpg +Places365_test_00033981.jpg +Places365_test_00034030.jpg +Places365_test_00034033.jpg +Places365_test_00034053.jpg +Places365_test_00034060.jpg +Places365_test_00034066.jpg +Places365_test_00034128.jpg +Places365_test_00034131.jpg +Places365_test_00034137.jpg +Places365_test_00034150.jpg +Places365_test_00034152.jpg +Places365_test_00034153.jpg +Places365_test_00034192.jpg +Places365_test_00034212.jpg +Places365_test_00034223.jpg +Places365_test_00034228.jpg +Places365_test_00034238.jpg +Places365_test_00034246.jpg +Places365_test_00034247.jpg +Places365_test_00034259.jpg +Places365_test_00034290.jpg +Places365_test_00034317.jpg +Places365_test_00034342.jpg +Places365_test_00034344.jpg +Places365_test_00034347.jpg +Places365_test_00034353.jpg +Places365_test_00034363.jpg +Places365_test_00034371.jpg +Places365_test_00034374.jpg +Places365_test_00034379.jpg +Places365_test_00034395.jpg +Places365_test_00034406.jpg +Places365_test_00034410.jpg +Places365_test_00034436.jpg +Places365_test_00034446.jpg +Places365_test_00034460.jpg +Places365_test_00034461.jpg +Places365_test_00034466.jpg +Places365_test_00034468.jpg +Places365_test_00034483.jpg +Places365_test_00034486.jpg +Places365_test_00034500.jpg +Places365_test_00034504.jpg +Places365_test_00034508.jpg +Places365_test_00034522.jpg +Places365_test_00034535.jpg +Places365_test_00034542.jpg +Places365_test_00034548.jpg +Places365_test_00034553.jpg +Places365_test_00034568.jpg +Places365_test_00034573.jpg +Places365_test_00034574.jpg +Places365_test_00034595.jpg +Places365_test_00034599.jpg +Places365_test_00034606.jpg +Places365_test_00034627.jpg +Places365_test_00034639.jpg +Places365_test_00034649.jpg +Places365_test_00034662.jpg +Places365_test_00034671.jpg +Places365_test_00034695.jpg +Places365_test_00034697.jpg +Places365_test_00034711.jpg +Places365_test_00034713.jpg +Places365_test_00034726.jpg +Places365_test_00034763.jpg +Places365_test_00034765.jpg +Places365_test_00034800.jpg +Places365_test_00034823.jpg +Places365_test_00034827.jpg +Places365_test_00034850.jpg +Places365_test_00034862.jpg +Places365_test_00034881.jpg +Places365_test_00034886.jpg +Places365_test_00034908.jpg +Places365_test_00034919.jpg +Places365_test_00034935.jpg +Places365_test_00034944.jpg +Places365_test_00034952.jpg +Places365_test_00034958.jpg +Places365_test_00034963.jpg +Places365_test_00034966.jpg +Places365_test_00034967.jpg +Places365_test_00034991.jpg +Places365_test_00034994.jpg +Places365_test_00034995.jpg +Places365_test_00035025.jpg +Places365_test_00035028.jpg +Places365_test_00035038.jpg +Places365_test_00035056.jpg +Places365_test_00035060.jpg +Places365_test_00035077.jpg +Places365_test_00035092.jpg +Places365_test_00035096.jpg +Places365_test_00035108.jpg +Places365_test_00035111.jpg +Places365_test_00035112.jpg +Places365_test_00035147.jpg +Places365_test_00035150.jpg +Places365_test_00035171.jpg +Places365_test_00035191.jpg +Places365_test_00035206.jpg +Places365_test_00035208.jpg +Places365_test_00035221.jpg +Places365_test_00035225.jpg +Places365_test_00035236.jpg +Places365_test_00035256.jpg +Places365_test_00035266.jpg +Places365_test_00035267.jpg +Places365_test_00035279.jpg +Places365_test_00035295.jpg +Places365_test_00035303.jpg +Places365_test_00035326.jpg +Places365_test_00035345.jpg +Places365_test_00035350.jpg +Places365_test_00035351.jpg +Places365_test_00035365.jpg +Places365_test_00035370.jpg +Places365_test_00035383.jpg +Places365_test_00035386.jpg +Places365_test_00035394.jpg +Places365_test_00035396.jpg +Places365_test_00035397.jpg +Places365_test_00035414.jpg +Places365_test_00035427.jpg +Places365_test_00035471.jpg +Places365_test_00035484.jpg +Places365_test_00035507.jpg +Places365_test_00035522.jpg +Places365_test_00035525.jpg +Places365_test_00035526.jpg +Places365_test_00035537.jpg +Places365_test_00035541.jpg +Places365_test_00035542.jpg +Places365_test_00035554.jpg +Places365_test_00035560.jpg +Places365_test_00035566.jpg +Places365_test_00035575.jpg +Places365_test_00035586.jpg +Places365_test_00035621.jpg +Places365_test_00035638.jpg +Places365_test_00035646.jpg +Places365_test_00035670.jpg +Places365_test_00035690.jpg +Places365_test_00035695.jpg +Places365_test_00035698.jpg +Places365_test_00035722.jpg +Places365_test_00035730.jpg +Places365_test_00035736.jpg +Places365_test_00035737.jpg +Places365_test_00035751.jpg +Places365_test_00035756.jpg +Places365_test_00035779.jpg +Places365_test_00035782.jpg +Places365_test_00035786.jpg +Places365_test_00035812.jpg +Places365_test_00035823.jpg +Places365_test_00035828.jpg +Places365_test_00035829.jpg +Places365_test_00035858.jpg +Places365_test_00035872.jpg +Places365_test_00035877.jpg +Places365_test_00035895.jpg +Places365_test_00035903.jpg +Places365_test_00035906.jpg +Places365_test_00035956.jpg +Places365_test_00035979.jpg +Places365_test_00035992.jpg +Places365_test_00036005.jpg +Places365_test_00036008.jpg +Places365_test_00036029.jpg +Places365_test_00036049.jpg +Places365_test_00036055.jpg +Places365_test_00036065.jpg +Places365_test_00036082.jpg +Places365_test_00036085.jpg +Places365_test_00036111.jpg +Places365_test_00036113.jpg +Places365_test_00036114.jpg +Places365_test_00036118.jpg +Places365_test_00036144.jpg +Places365_test_00036146.jpg +Places365_test_00036153.jpg +Places365_test_00036167.jpg +Places365_test_00036177.jpg +Places365_test_00036179.jpg +Places365_test_00036190.jpg +Places365_test_00036195.jpg +Places365_test_00036199.jpg +Places365_test_00036204.jpg +Places365_test_00036216.jpg +Places365_test_00036225.jpg +Places365_test_00036244.jpg +Places365_test_00036249.jpg +Places365_test_00036253.jpg +Places365_test_00036258.jpg +Places365_test_00036270.jpg +Places365_test_00036272.jpg +Places365_test_00036282.jpg +Places365_test_00036285.jpg +Places365_test_00036291.jpg +Places365_test_00036292.jpg +Places365_test_00036309.jpg +Places365_test_00036320.jpg +Places365_test_00036330.jpg +Places365_test_00036333.jpg +Places365_test_00036349.jpg +Places365_test_00036350.jpg +Places365_test_00036373.jpg +Places365_test_00036396.jpg +Places365_test_00036427.jpg +Places365_test_00036442.jpg +Places365_test_00036487.jpg +Places365_test_00036488.jpg +Places365_test_00036493.jpg +Places365_test_00036495.jpg +Places365_test_00036501.jpg +Places365_test_00036518.jpg +Places365_test_00036543.jpg +Places365_test_00036544.jpg +Places365_test_00036551.jpg +Places365_test_00036559.jpg +Places365_test_00036602.jpg +Places365_test_00036605.jpg +Places365_test_00036606.jpg +Places365_test_00036630.jpg +Places365_test_00036642.jpg +Places365_test_00036645.jpg +Places365_test_00036651.jpg +Places365_test_00036694.jpg +Places365_test_00036696.jpg +Places365_test_00036699.jpg +Places365_test_00036710.jpg +Places365_test_00036718.jpg +Places365_test_00036719.jpg +Places365_test_00036735.jpg +Places365_test_00036738.jpg +Places365_test_00036762.jpg +Places365_test_00036790.jpg +Places365_test_00036811.jpg +Places365_test_00036812.jpg +Places365_test_00036814.jpg +Places365_test_00036818.jpg +Places365_test_00036821.jpg +Places365_test_00036850.jpg +Places365_test_00036868.jpg +Places365_test_00036888.jpg +Places365_test_00036901.jpg +Places365_test_00036908.jpg +Places365_test_00036915.jpg +Places365_test_00036921.jpg +Places365_test_00036932.jpg +Places365_test_00036938.jpg +Places365_test_00036944.jpg +Places365_test_00036946.jpg +Places365_test_00036947.jpg +Places365_test_00036960.jpg +Places365_test_00036969.jpg +Places365_test_00036974.jpg +Places365_test_00036977.jpg +Places365_test_00036984.jpg +Places365_test_00036989.jpg +Places365_test_00036992.jpg +Places365_test_00036993.jpg +Places365_test_00037014.jpg +Places365_test_00037039.jpg +Places365_test_00037045.jpg +Places365_test_00037069.jpg +Places365_test_00037078.jpg +Places365_test_00037087.jpg +Places365_test_00037095.jpg +Places365_test_00037097.jpg +Places365_test_00037098.jpg +Places365_test_00037106.jpg +Places365_test_00037109.jpg +Places365_test_00037112.jpg +Places365_test_00037117.jpg +Places365_test_00037129.jpg +Places365_test_00037162.jpg +Places365_test_00037193.jpg +Places365_test_00037232.jpg +Places365_test_00037245.jpg +Places365_test_00037249.jpg +Places365_test_00037288.jpg +Places365_test_00037302.jpg +Places365_test_00037308.jpg +Places365_test_00037309.jpg +Places365_test_00037310.jpg +Places365_test_00037317.jpg +Places365_test_00037332.jpg +Places365_test_00037347.jpg +Places365_test_00037352.jpg +Places365_test_00037371.jpg +Places365_test_00037402.jpg +Places365_test_00037420.jpg +Places365_test_00037421.jpg +Places365_test_00037424.jpg +Places365_test_00037444.jpg +Places365_test_00037446.jpg +Places365_test_00037464.jpg +Places365_test_00037468.jpg +Places365_test_00037472.jpg +Places365_test_00037482.jpg +Places365_test_00037483.jpg +Places365_test_00037509.jpg +Places365_test_00037515.jpg +Places365_test_00037532.jpg +Places365_test_00037534.jpg +Places365_test_00037538.jpg +Places365_test_00037539.jpg +Places365_test_00037561.jpg +Places365_test_00037576.jpg +Places365_test_00037591.jpg +Places365_test_00037599.jpg +Places365_test_00037620.jpg +Places365_test_00037626.jpg +Places365_test_00037633.jpg +Places365_test_00037646.jpg +Places365_test_00037649.jpg +Places365_test_00037667.jpg +Places365_test_00037672.jpg +Places365_test_00037674.jpg +Places365_test_00037679.jpg +Places365_test_00037680.jpg +Places365_test_00037698.jpg +Places365_test_00037763.jpg +Places365_test_00037764.jpg +Places365_test_00037778.jpg +Places365_test_00037783.jpg +Places365_test_00037786.jpg +Places365_test_00037794.jpg +Places365_test_00037808.jpg +Places365_test_00037809.jpg +Places365_test_00037826.jpg +Places365_test_00037838.jpg +Places365_test_00037848.jpg +Places365_test_00037860.jpg +Places365_test_00037868.jpg +Places365_test_00037876.jpg +Places365_test_00037881.jpg +Places365_test_00037883.jpg +Places365_test_00037932.jpg +Places365_test_00037944.jpg +Places365_test_00037951.jpg +Places365_test_00037964.jpg +Places365_test_00037974.jpg +Places365_test_00037976.jpg +Places365_test_00037980.jpg +Places365_test_00037997.jpg +Places365_test_00038001.jpg +Places365_test_00038005.jpg +Places365_test_00038032.jpg +Places365_test_00038063.jpg +Places365_test_00038077.jpg +Places365_test_00038093.jpg +Places365_test_00038100.jpg +Places365_test_00038104.jpg +Places365_test_00038119.jpg +Places365_test_00038134.jpg +Places365_test_00038139.jpg +Places365_test_00038140.jpg +Places365_test_00038153.jpg +Places365_test_00038170.jpg +Places365_test_00038172.jpg +Places365_test_00038175.jpg +Places365_test_00038178.jpg +Places365_test_00038181.jpg +Places365_test_00038188.jpg +Places365_test_00038190.jpg +Places365_test_00038195.jpg +Places365_test_00038202.jpg +Places365_test_00038203.jpg +Places365_test_00038207.jpg +Places365_test_00038215.jpg +Places365_test_00038225.jpg +Places365_test_00038249.jpg +Places365_test_00038263.jpg +Places365_test_00038264.jpg +Places365_test_00038278.jpg +Places365_test_00038287.jpg +Places365_test_00038306.jpg +Places365_test_00038318.jpg +Places365_test_00038346.jpg +Places365_test_00038370.jpg +Places365_test_00038373.jpg +Places365_test_00038376.jpg +Places365_test_00038384.jpg +Places365_test_00038389.jpg +Places365_test_00038398.jpg +Places365_test_00038412.jpg +Places365_test_00038431.jpg +Places365_test_00038433.jpg +Places365_test_00038434.jpg +Places365_test_00038445.jpg +Places365_test_00038492.jpg +Places365_test_00038502.jpg +Places365_test_00038503.jpg +Places365_test_00038512.jpg +Places365_test_00038513.jpg +Places365_test_00038517.jpg +Places365_test_00038527.jpg +Places365_test_00038528.jpg +Places365_test_00038537.jpg +Places365_test_00038550.jpg +Places365_test_00038574.jpg +Places365_test_00038584.jpg +Places365_test_00038610.jpg +Places365_test_00038622.jpg +Places365_test_00038634.jpg +Places365_test_00038675.jpg +Places365_test_00038685.jpg +Places365_test_00038688.jpg +Places365_test_00038730.jpg +Places365_test_00038736.jpg +Places365_test_00038741.jpg +Places365_test_00038752.jpg +Places365_test_00038781.jpg +Places365_test_00038786.jpg +Places365_test_00038793.jpg +Places365_test_00038816.jpg +Places365_test_00038817.jpg +Places365_test_00038818.jpg +Places365_test_00038845.jpg +Places365_test_00038865.jpg +Places365_test_00038885.jpg +Places365_test_00038887.jpg +Places365_test_00038905.jpg +Places365_test_00038910.jpg +Places365_test_00038914.jpg +Places365_test_00038918.jpg +Places365_test_00038954.jpg +Places365_test_00038965.jpg +Places365_test_00038970.jpg +Places365_test_00038977.jpg +Places365_test_00038981.jpg +Places365_test_00039005.jpg +Places365_test_00039009.jpg +Places365_test_00039012.jpg +Places365_test_00039020.jpg +Places365_test_00039023.jpg +Places365_test_00039026.jpg +Places365_test_00039028.jpg +Places365_test_00039041.jpg +Places365_test_00039045.jpg +Places365_test_00039047.jpg +Places365_test_00039064.jpg +Places365_test_00039077.jpg +Places365_test_00039080.jpg +Places365_test_00039087.jpg +Places365_test_00039089.jpg +Places365_test_00039092.jpg +Places365_test_00039099.jpg +Places365_test_00039113.jpg +Places365_test_00039116.jpg +Places365_test_00039145.jpg +Places365_test_00039153.jpg +Places365_test_00039179.jpg +Places365_test_00039191.jpg +Places365_test_00039197.jpg +Places365_test_00039204.jpg +Places365_test_00039210.jpg +Places365_test_00039213.jpg +Places365_test_00039218.jpg +Places365_test_00039234.jpg +Places365_test_00039253.jpg +Places365_test_00039268.jpg +Places365_test_00039275.jpg +Places365_test_00039285.jpg +Places365_test_00039301.jpg +Places365_test_00039310.jpg +Places365_test_00039321.jpg +Places365_test_00039323.jpg +Places365_test_00039326.jpg +Places365_test_00039339.jpg +Places365_test_00039368.jpg +Places365_test_00039370.jpg +Places365_test_00039376.jpg +Places365_test_00039379.jpg +Places365_test_00039393.jpg +Places365_test_00039410.jpg +Places365_test_00039493.jpg +Places365_test_00039498.jpg +Places365_test_00039505.jpg +Places365_test_00039571.jpg +Places365_test_00039580.jpg +Places365_test_00039592.jpg +Places365_test_00039602.jpg +Places365_test_00039634.jpg +Places365_test_00039641.jpg +Places365_test_00039646.jpg +Places365_test_00039650.jpg +Places365_test_00039669.jpg +Places365_test_00039673.jpg +Places365_test_00039676.jpg +Places365_test_00039694.jpg +Places365_test_00039697.jpg +Places365_test_00039699.jpg +Places365_test_00039704.jpg +Places365_test_00039706.jpg +Places365_test_00039711.jpg +Places365_test_00039721.jpg +Places365_test_00039739.jpg +Places365_test_00039765.jpg +Places365_test_00039766.jpg +Places365_test_00039769.jpg +Places365_test_00039772.jpg +Places365_test_00039785.jpg +Places365_test_00039787.jpg +Places365_test_00039794.jpg +Places365_test_00039805.jpg +Places365_test_00039814.jpg +Places365_test_00039815.jpg +Places365_test_00039826.jpg +Places365_test_00039842.jpg +Places365_test_00039866.jpg +Places365_test_00039879.jpg +Places365_test_00039894.jpg +Places365_test_00039901.jpg +Places365_test_00039921.jpg +Places365_test_00039933.jpg +Places365_test_00039934.jpg +Places365_test_00039937.jpg +Places365_test_00039942.jpg +Places365_test_00039953.jpg +Places365_test_00039985.jpg +Places365_test_00040030.jpg +Places365_test_00040033.jpg +Places365_test_00040038.jpg +Places365_test_00040067.jpg +Places365_test_00040073.jpg +Places365_test_00040115.jpg +Places365_test_00040122.jpg +Places365_test_00040135.jpg +Places365_test_00040140.jpg +Places365_test_00040157.jpg +Places365_test_00040160.jpg +Places365_test_00040166.jpg +Places365_test_00040176.jpg +Places365_test_00040183.jpg +Places365_test_00040187.jpg +Places365_test_00040200.jpg +Places365_test_00040203.jpg +Places365_test_00040206.jpg +Places365_test_00040216.jpg +Places365_test_00040217.jpg +Places365_test_00040218.jpg +Places365_test_00040231.jpg +Places365_test_00040240.jpg +Places365_test_00040267.jpg +Places365_test_00040272.jpg +Places365_test_00040274.jpg +Places365_test_00040277.jpg +Places365_test_00040313.jpg +Places365_test_00040314.jpg +Places365_test_00040315.jpg +Places365_test_00040328.jpg +Places365_test_00040330.jpg +Places365_test_00040344.jpg +Places365_test_00040361.jpg +Places365_test_00040366.jpg +Places365_test_00040370.jpg +Places365_test_00040375.jpg +Places365_test_00040387.jpg +Places365_test_00040394.jpg +Places365_test_00040397.jpg +Places365_test_00040401.jpg +Places365_test_00040415.jpg +Places365_test_00040433.jpg +Places365_test_00040436.jpg +Places365_test_00040446.jpg +Places365_test_00040465.jpg +Places365_test_00040471.jpg +Places365_test_00040479.jpg +Places365_test_00040487.jpg +Places365_test_00040489.jpg +Places365_test_00040492.jpg +Places365_test_00040507.jpg +Places365_test_00040523.jpg +Places365_test_00040536.jpg +Places365_test_00040547.jpg +Places365_test_00040558.jpg +Places365_test_00040560.jpg +Places365_test_00040580.jpg +Places365_test_00040584.jpg +Places365_test_00040588.jpg +Places365_test_00040590.jpg +Places365_test_00040619.jpg +Places365_test_00040624.jpg +Places365_test_00040631.jpg +Places365_test_00040644.jpg +Places365_test_00040655.jpg +Places365_test_00040670.jpg +Places365_test_00040671.jpg +Places365_test_00040672.jpg +Places365_test_00040679.jpg +Places365_test_00040689.jpg +Places365_test_00040699.jpg +Places365_test_00040700.jpg +Places365_test_00040721.jpg +Places365_test_00040723.jpg +Places365_test_00040729.jpg +Places365_test_00040737.jpg +Places365_test_00040745.jpg +Places365_test_00040755.jpg +Places365_test_00040769.jpg +Places365_test_00040776.jpg +Places365_test_00040777.jpg +Places365_test_00040781.jpg +Places365_test_00040792.jpg +Places365_test_00040797.jpg +Places365_test_00040798.jpg +Places365_test_00040801.jpg +Places365_test_00040830.jpg +Places365_test_00040856.jpg +Places365_test_00040864.jpg +Places365_test_00040868.jpg +Places365_test_00040883.jpg +Places365_test_00040896.jpg +Places365_test_00040912.jpg +Places365_test_00040924.jpg +Places365_test_00040927.jpg +Places365_test_00040946.jpg +Places365_test_00040973.jpg +Places365_test_00040982.jpg +Places365_test_00040998.jpg +Places365_test_00041000.jpg +Places365_test_00041002.jpg +Places365_test_00041006.jpg +Places365_test_00041009.jpg +Places365_test_00041014.jpg +Places365_test_00041017.jpg +Places365_test_00041022.jpg +Places365_test_00041027.jpg +Places365_test_00041037.jpg +Places365_test_00041039.jpg +Places365_test_00041051.jpg +Places365_test_00041071.jpg +Places365_test_00041084.jpg +Places365_test_00041103.jpg +Places365_test_00041105.jpg +Places365_test_00041145.jpg +Places365_test_00041152.jpg +Places365_test_00041164.jpg +Places365_test_00041180.jpg +Places365_test_00041184.jpg +Places365_test_00041204.jpg +Places365_test_00041215.jpg +Places365_test_00041218.jpg +Places365_test_00041222.jpg +Places365_test_00041224.jpg +Places365_test_00041226.jpg +Places365_test_00041232.jpg +Places365_test_00041237.jpg +Places365_test_00041243.jpg +Places365_test_00041248.jpg +Places365_test_00041249.jpg +Places365_test_00041257.jpg +Places365_test_00041271.jpg +Places365_test_00041275.jpg +Places365_test_00041280.jpg +Places365_test_00041284.jpg +Places365_test_00041289.jpg +Places365_test_00041293.jpg +Places365_test_00041303.jpg +Places365_test_00041309.jpg +Places365_test_00041314.jpg +Places365_test_00041340.jpg +Places365_test_00041348.jpg +Places365_test_00041364.jpg +Places365_test_00041421.jpg +Places365_test_00041462.jpg +Places365_test_00041468.jpg +Places365_test_00041513.jpg +Places365_test_00041522.jpg +Places365_test_00041524.jpg +Places365_test_00041542.jpg +Places365_test_00041551.jpg +Places365_test_00041555.jpg +Places365_test_00041571.jpg +Places365_test_00041578.jpg +Places365_test_00041584.jpg +Places365_test_00041594.jpg +Places365_test_00041607.jpg +Places365_test_00041614.jpg +Places365_test_00041619.jpg +Places365_test_00041625.jpg +Places365_test_00041627.jpg +Places365_test_00041628.jpg +Places365_test_00041637.jpg +Places365_test_00041640.jpg +Places365_test_00041659.jpg +Places365_test_00041665.jpg +Places365_test_00041673.jpg +Places365_test_00041678.jpg +Places365_test_00041680.jpg +Places365_test_00041683.jpg +Places365_test_00041684.jpg +Places365_test_00041691.jpg +Places365_test_00041723.jpg +Places365_test_00041735.jpg +Places365_test_00041737.jpg +Places365_test_00041758.jpg +Places365_test_00041770.jpg +Places365_test_00041819.jpg +Places365_test_00041828.jpg +Places365_test_00041829.jpg +Places365_test_00041840.jpg +Places365_test_00041846.jpg +Places365_test_00041860.jpg +Places365_test_00041878.jpg +Places365_test_00041891.jpg +Places365_test_00041905.jpg +Places365_test_00041906.jpg +Places365_test_00041910.jpg +Places365_test_00041956.jpg +Places365_test_00041957.jpg +Places365_test_00041962.jpg +Places365_test_00041966.jpg +Places365_test_00042004.jpg +Places365_test_00042013.jpg +Places365_test_00042021.jpg +Places365_test_00042033.jpg +Places365_test_00042046.jpg +Places365_test_00042067.jpg +Places365_test_00042068.jpg +Places365_test_00042079.jpg +Places365_test_00042086.jpg +Places365_test_00042091.jpg +Places365_test_00042099.jpg +Places365_test_00042106.jpg +Places365_test_00042135.jpg +Places365_test_00042136.jpg +Places365_test_00042162.jpg +Places365_test_00042163.jpg +Places365_test_00042173.jpg +Places365_test_00042174.jpg +Places365_test_00042187.jpg +Places365_test_00042193.jpg +Places365_test_00042211.jpg +Places365_test_00042228.jpg +Places365_test_00042265.jpg +Places365_test_00042266.jpg +Places365_test_00042274.jpg +Places365_test_00042278.jpg +Places365_test_00042280.jpg +Places365_test_00042300.jpg +Places365_test_00042302.jpg +Places365_test_00042304.jpg +Places365_test_00042323.jpg +Places365_test_00042329.jpg +Places365_test_00042331.jpg +Places365_test_00042337.jpg +Places365_test_00042351.jpg +Places365_test_00042362.jpg +Places365_test_00042363.jpg +Places365_test_00042366.jpg +Places365_test_00042368.jpg +Places365_test_00042379.jpg +Places365_test_00042424.jpg +Places365_test_00042459.jpg +Places365_test_00042482.jpg +Places365_test_00042484.jpg +Places365_test_00042500.jpg +Places365_test_00042502.jpg +Places365_test_00042513.jpg +Places365_test_00042515.jpg +Places365_test_00042519.jpg +Places365_test_00042524.jpg +Places365_test_00042551.jpg +Places365_test_00042557.jpg +Places365_test_00042564.jpg +Places365_test_00042567.jpg +Places365_test_00042575.jpg +Places365_test_00042580.jpg +Places365_test_00042581.jpg +Places365_test_00042585.jpg +Places365_test_00042603.jpg +Places365_test_00042604.jpg +Places365_test_00042609.jpg +Places365_test_00042612.jpg +Places365_test_00042635.jpg +Places365_test_00042638.jpg +Places365_test_00042645.jpg +Places365_test_00042651.jpg +Places365_test_00042654.jpg +Places365_test_00042666.jpg +Places365_test_00042700.jpg +Places365_test_00042704.jpg +Places365_test_00042725.jpg +Places365_test_00042727.jpg +Places365_test_00042755.jpg +Places365_test_00042769.jpg +Places365_test_00042774.jpg +Places365_test_00042779.jpg +Places365_test_00042786.jpg +Places365_test_00042787.jpg +Places365_test_00042798.jpg +Places365_test_00042806.jpg +Places365_test_00042807.jpg +Places365_test_00042816.jpg +Places365_test_00042847.jpg +Places365_test_00042853.jpg +Places365_test_00042861.jpg +Places365_test_00042866.jpg +Places365_test_00042867.jpg +Places365_test_00042868.jpg +Places365_test_00042878.jpg +Places365_test_00042889.jpg +Places365_test_00042894.jpg +Places365_test_00042895.jpg +Places365_test_00042917.jpg +Places365_test_00042924.jpg +Places365_test_00042947.jpg +Places365_test_00042948.jpg +Places365_test_00042955.jpg +Places365_test_00042970.jpg +Places365_test_00042978.jpg +Places365_test_00042981.jpg +Places365_test_00042991.jpg +Places365_test_00043003.jpg +Places365_test_00043012.jpg +Places365_test_00043015.jpg +Places365_test_00043031.jpg +Places365_test_00043032.jpg +Places365_test_00043064.jpg +Places365_test_00043068.jpg +Places365_test_00043091.jpg +Places365_test_00043096.jpg +Places365_test_00043107.jpg +Places365_test_00043125.jpg +Places365_test_00043133.jpg +Places365_test_00043138.jpg +Places365_test_00043157.jpg +Places365_test_00043167.jpg +Places365_test_00043181.jpg +Places365_test_00043194.jpg +Places365_test_00043195.jpg +Places365_test_00043196.jpg +Places365_test_00043219.jpg +Places365_test_00043221.jpg +Places365_test_00043232.jpg +Places365_test_00043239.jpg +Places365_test_00043244.jpg +Places365_test_00043253.jpg +Places365_test_00043279.jpg +Places365_test_00043287.jpg +Places365_test_00043306.jpg +Places365_test_00043311.jpg +Places365_test_00043323.jpg +Places365_test_00043331.jpg +Places365_test_00043337.jpg +Places365_test_00043348.jpg +Places365_test_00043349.jpg +Places365_test_00043359.jpg +Places365_test_00043365.jpg +Places365_test_00043366.jpg +Places365_test_00043386.jpg +Places365_test_00043390.jpg +Places365_test_00043395.jpg +Places365_test_00043402.jpg +Places365_test_00043413.jpg +Places365_test_00043423.jpg +Places365_test_00043424.jpg +Places365_test_00043425.jpg +Places365_test_00043431.jpg +Places365_test_00043475.jpg +Places365_test_00043485.jpg +Places365_test_00043490.jpg +Places365_test_00043498.jpg +Places365_test_00043507.jpg +Places365_test_00043521.jpg +Places365_test_00043549.jpg +Places365_test_00043552.jpg +Places365_test_00043556.jpg +Places365_test_00043561.jpg +Places365_test_00043562.jpg +Places365_test_00043566.jpg +Places365_test_00043574.jpg +Places365_test_00043586.jpg +Places365_test_00043599.jpg +Places365_test_00043606.jpg +Places365_test_00043614.jpg +Places365_test_00043653.jpg +Places365_test_00043656.jpg +Places365_test_00043670.jpg +Places365_test_00043671.jpg +Places365_test_00043683.jpg +Places365_test_00043684.jpg +Places365_test_00043694.jpg +Places365_test_00043695.jpg +Places365_test_00043722.jpg +Places365_test_00043725.jpg +Places365_test_00043750.jpg +Places365_test_00043774.jpg +Places365_test_00043779.jpg +Places365_test_00043802.jpg +Places365_test_00043812.jpg +Places365_test_00043814.jpg +Places365_test_00043827.jpg +Places365_test_00043831.jpg +Places365_test_00043837.jpg +Places365_test_00043848.jpg +Places365_test_00043853.jpg +Places365_test_00043859.jpg +Places365_test_00043865.jpg +Places365_test_00043866.jpg +Places365_test_00043897.jpg +Places365_test_00043903.jpg +Places365_test_00043907.jpg +Places365_test_00043909.jpg +Places365_test_00043937.jpg +Places365_test_00043941.jpg +Places365_test_00043946.jpg +Places365_test_00043952.jpg +Places365_test_00043965.jpg +Places365_test_00043971.jpg +Places365_test_00043979.jpg +Places365_test_00043991.jpg +Places365_test_00043993.jpg +Places365_test_00043994.jpg +Places365_test_00043998.jpg +Places365_test_00043999.jpg +Places365_test_00044006.jpg +Places365_test_00044010.jpg +Places365_test_00044026.jpg +Places365_test_00044031.jpg +Places365_test_00044043.jpg +Places365_test_00044056.jpg +Places365_test_00044074.jpg +Places365_test_00044105.jpg +Places365_test_00044135.jpg +Places365_test_00044138.jpg +Places365_test_00044139.jpg +Places365_test_00044152.jpg +Places365_test_00044166.jpg +Places365_test_00044170.jpg +Places365_test_00044178.jpg +Places365_test_00044191.jpg +Places365_test_00044197.jpg +Places365_test_00044208.jpg +Places365_test_00044211.jpg +Places365_test_00044231.jpg +Places365_test_00044256.jpg +Places365_test_00044262.jpg +Places365_test_00044278.jpg +Places365_test_00044287.jpg +Places365_test_00044290.jpg +Places365_test_00044291.jpg +Places365_test_00044292.jpg +Places365_test_00044294.jpg +Places365_test_00044296.jpg +Places365_test_00044300.jpg +Places365_test_00044321.jpg +Places365_test_00044322.jpg +Places365_test_00044329.jpg +Places365_test_00044356.jpg +Places365_test_00044367.jpg +Places365_test_00044405.jpg +Places365_test_00044454.jpg +Places365_test_00044458.jpg +Places365_test_00044462.jpg +Places365_test_00044480.jpg +Places365_test_00044481.jpg +Places365_test_00044488.jpg +Places365_test_00044491.jpg +Places365_test_00044512.jpg +Places365_test_00044520.jpg +Places365_test_00044521.jpg +Places365_test_00044530.jpg +Places365_test_00044551.jpg +Places365_test_00044561.jpg +Places365_test_00044584.jpg +Places365_test_00044586.jpg +Places365_test_00044606.jpg +Places365_test_00044631.jpg +Places365_test_00044634.jpg +Places365_test_00044644.jpg +Places365_test_00044649.jpg +Places365_test_00044676.jpg +Places365_test_00044705.jpg +Places365_test_00044713.jpg +Places365_test_00044724.jpg +Places365_test_00044730.jpg +Places365_test_00044748.jpg +Places365_test_00044770.jpg +Places365_test_00044772.jpg +Places365_test_00044794.jpg +Places365_test_00044803.jpg +Places365_test_00044825.jpg +Places365_test_00044838.jpg +Places365_test_00044847.jpg +Places365_test_00044870.jpg +Places365_test_00044871.jpg +Places365_test_00044894.jpg +Places365_test_00044908.jpg +Places365_test_00044925.jpg +Places365_test_00044941.jpg +Places365_test_00044942.jpg +Places365_test_00044945.jpg +Places365_test_00044964.jpg +Places365_test_00044966.jpg +Places365_test_00044971.jpg +Places365_test_00045012.jpg +Places365_test_00045020.jpg +Places365_test_00045027.jpg +Places365_test_00045029.jpg +Places365_test_00045036.jpg +Places365_test_00045039.jpg +Places365_test_00045044.jpg +Places365_test_00045084.jpg +Places365_test_00045100.jpg +Places365_test_00045116.jpg +Places365_test_00045138.jpg +Places365_test_00045144.jpg +Places365_test_00045163.jpg +Places365_test_00045165.jpg +Places365_test_00045166.jpg +Places365_test_00045190.jpg +Places365_test_00045194.jpg +Places365_test_00045197.jpg +Places365_test_00045199.jpg +Places365_test_00045203.jpg +Places365_test_00045205.jpg +Places365_test_00045221.jpg +Places365_test_00045225.jpg +Places365_test_00045227.jpg +Places365_test_00045228.jpg +Places365_test_00045233.jpg +Places365_test_00045247.jpg +Places365_test_00045248.jpg +Places365_test_00045250.jpg +Places365_test_00045281.jpg +Places365_test_00045311.jpg +Places365_test_00045312.jpg +Places365_test_00045320.jpg +Places365_test_00045322.jpg +Places365_test_00045329.jpg +Places365_test_00045346.jpg +Places365_test_00045353.jpg +Places365_test_00045361.jpg +Places365_test_00045367.jpg +Places365_test_00045368.jpg +Places365_test_00045396.jpg +Places365_test_00045400.jpg +Places365_test_00045408.jpg +Places365_test_00045426.jpg +Places365_test_00045432.jpg +Places365_test_00045476.jpg +Places365_test_00045479.jpg +Places365_test_00045490.jpg +Places365_test_00045506.jpg +Places365_test_00045513.jpg +Places365_test_00045529.jpg +Places365_test_00045544.jpg +Places365_test_00045552.jpg +Places365_test_00045569.jpg +Places365_test_00045573.jpg +Places365_test_00045587.jpg +Places365_test_00045593.jpg +Places365_test_00045596.jpg +Places365_test_00045613.jpg +Places365_test_00045634.jpg +Places365_test_00045648.jpg +Places365_test_00045673.jpg +Places365_test_00045697.jpg +Places365_test_00045700.jpg +Places365_test_00045713.jpg +Places365_test_00045724.jpg +Places365_test_00045731.jpg +Places365_test_00045741.jpg +Places365_test_00045752.jpg +Places365_test_00045786.jpg +Places365_test_00045801.jpg +Places365_test_00045809.jpg +Places365_test_00045842.jpg +Places365_test_00045849.jpg +Places365_test_00045852.jpg +Places365_test_00045862.jpg +Places365_test_00045867.jpg +Places365_test_00045876.jpg +Places365_test_00045879.jpg +Places365_test_00045904.jpg +Places365_test_00045916.jpg +Places365_test_00045929.jpg +Places365_test_00045938.jpg +Places365_test_00045950.jpg +Places365_test_00045965.jpg +Places365_test_00045969.jpg +Places365_test_00045973.jpg +Places365_test_00045984.jpg +Places365_test_00045999.jpg +Places365_test_00046020.jpg +Places365_test_00046031.jpg +Places365_test_00046041.jpg +Places365_test_00046052.jpg +Places365_test_00046072.jpg +Places365_test_00046074.jpg +Places365_test_00046087.jpg +Places365_test_00046100.jpg +Places365_test_00046111.jpg +Places365_test_00046121.jpg +Places365_test_00046143.jpg +Places365_test_00046152.jpg +Places365_test_00046155.jpg +Places365_test_00046166.jpg +Places365_test_00046175.jpg +Places365_test_00046210.jpg +Places365_test_00046213.jpg +Places365_test_00046239.jpg +Places365_test_00046297.jpg +Places365_test_00046307.jpg +Places365_test_00046330.jpg +Places365_test_00046342.jpg +Places365_test_00046343.jpg +Places365_test_00046356.jpg +Places365_test_00046371.jpg +Places365_test_00046373.jpg +Places365_test_00046386.jpg +Places365_test_00046405.jpg +Places365_test_00046440.jpg +Places365_test_00046442.jpg +Places365_test_00046484.jpg +Places365_test_00046488.jpg +Places365_test_00046493.jpg +Places365_test_00046497.jpg +Places365_test_00046501.jpg +Places365_test_00046506.jpg +Places365_test_00046517.jpg +Places365_test_00046530.jpg +Places365_test_00046544.jpg +Places365_test_00046545.jpg +Places365_test_00046550.jpg +Places365_test_00046552.jpg +Places365_test_00046579.jpg +Places365_test_00046583.jpg +Places365_test_00046610.jpg +Places365_test_00046617.jpg +Places365_test_00046622.jpg +Places365_test_00046632.jpg +Places365_test_00046637.jpg +Places365_test_00046670.jpg +Places365_test_00046709.jpg +Places365_test_00046714.jpg +Places365_test_00046722.jpg +Places365_test_00046744.jpg +Places365_test_00046760.jpg +Places365_test_00046762.jpg +Places365_test_00046765.jpg +Places365_test_00046770.jpg +Places365_test_00046793.jpg +Places365_test_00046795.jpg +Places365_test_00046798.jpg +Places365_test_00046799.jpg +Places365_test_00046807.jpg +Places365_test_00046825.jpg +Places365_test_00046827.jpg +Places365_test_00046834.jpg +Places365_test_00046861.jpg +Places365_test_00046864.jpg +Places365_test_00046870.jpg +Places365_test_00046893.jpg +Places365_test_00046896.jpg +Places365_test_00046898.jpg +Places365_test_00046908.jpg +Places365_test_00046916.jpg +Places365_test_00046917.jpg +Places365_test_00046922.jpg +Places365_test_00046926.jpg +Places365_test_00046932.jpg +Places365_test_00046936.jpg +Places365_test_00046946.jpg +Places365_test_00046969.jpg +Places365_test_00046972.jpg +Places365_test_00046979.jpg +Places365_test_00047014.jpg +Places365_test_00047050.jpg +Places365_test_00047071.jpg +Places365_test_00047097.jpg +Places365_test_00047105.jpg +Places365_test_00047110.jpg +Places365_test_00047117.jpg +Places365_test_00047123.jpg +Places365_test_00047140.jpg +Places365_test_00047150.jpg +Places365_test_00047154.jpg +Places365_test_00047178.jpg +Places365_test_00047187.jpg +Places365_test_00047188.jpg +Places365_test_00047189.jpg +Places365_test_00047201.jpg +Places365_test_00047210.jpg +Places365_test_00047216.jpg +Places365_test_00047230.jpg +Places365_test_00047240.jpg +Places365_test_00047246.jpg +Places365_test_00047251.jpg +Places365_test_00047253.jpg +Places365_test_00047261.jpg +Places365_test_00047270.jpg +Places365_test_00047273.jpg +Places365_test_00047278.jpg +Places365_test_00047288.jpg +Places365_test_00047300.jpg +Places365_test_00047422.jpg +Places365_test_00047444.jpg +Places365_test_00047450.jpg +Places365_test_00047464.jpg +Places365_test_00047471.jpg +Places365_test_00047501.jpg +Places365_test_00047509.jpg +Places365_test_00047523.jpg +Places365_test_00047524.jpg +Places365_test_00047529.jpg +Places365_test_00047530.jpg +Places365_test_00047541.jpg +Places365_test_00047556.jpg +Places365_test_00047568.jpg +Places365_test_00047575.jpg +Places365_test_00047604.jpg +Places365_test_00047626.jpg +Places365_test_00047632.jpg +Places365_test_00047645.jpg +Places365_test_00047659.jpg +Places365_test_00047694.jpg +Places365_test_00047710.jpg +Places365_test_00047712.jpg +Places365_test_00047713.jpg +Places365_test_00047740.jpg +Places365_test_00047741.jpg +Places365_test_00047748.jpg +Places365_test_00047749.jpg +Places365_test_00047764.jpg +Places365_test_00047775.jpg +Places365_test_00047776.jpg +Places365_test_00047778.jpg +Places365_test_00047781.jpg +Places365_test_00047793.jpg +Places365_test_00047794.jpg +Places365_test_00047820.jpg +Places365_test_00047832.jpg +Places365_test_00047833.jpg +Places365_test_00047838.jpg +Places365_test_00047848.jpg +Places365_test_00047854.jpg +Places365_test_00047857.jpg +Places365_test_00047859.jpg +Places365_test_00047872.jpg +Places365_test_00047881.jpg +Places365_test_00047895.jpg +Places365_test_00047904.jpg +Places365_test_00047930.jpg +Places365_test_00047935.jpg +Places365_test_00047941.jpg +Places365_test_00047980.jpg +Places365_test_00047983.jpg +Places365_test_00047994.jpg +Places365_test_00047999.jpg +Places365_test_00048022.jpg +Places365_test_00048037.jpg +Places365_test_00048055.jpg +Places365_test_00048081.jpg +Places365_test_00048088.jpg +Places365_test_00048108.jpg +Places365_test_00048112.jpg +Places365_test_00048113.jpg +Places365_test_00048120.jpg +Places365_test_00048122.jpg +Places365_test_00048146.jpg +Places365_test_00048156.jpg +Places365_test_00048169.jpg +Places365_test_00048182.jpg +Places365_test_00048190.jpg +Places365_test_00048196.jpg +Places365_test_00048203.jpg +Places365_test_00048206.jpg +Places365_test_00048211.jpg +Places365_test_00048212.jpg +Places365_test_00048216.jpg +Places365_test_00048217.jpg +Places365_test_00048218.jpg +Places365_test_00048226.jpg +Places365_test_00048231.jpg +Places365_test_00048239.jpg +Places365_test_00048254.jpg +Places365_test_00048296.jpg +Places365_test_00048303.jpg +Places365_test_00048310.jpg +Places365_test_00048311.jpg +Places365_test_00048317.jpg +Places365_test_00048332.jpg +Places365_test_00048347.jpg +Places365_test_00048351.jpg +Places365_test_00048385.jpg +Places365_test_00048402.jpg +Places365_test_00048421.jpg +Places365_test_00048436.jpg +Places365_test_00048450.jpg +Places365_test_00048475.jpg +Places365_test_00048479.jpg +Places365_test_00048482.jpg +Places365_test_00048492.jpg +Places365_test_00048498.jpg +Places365_test_00048507.jpg +Places365_test_00048512.jpg +Places365_test_00048518.jpg +Places365_test_00048532.jpg +Places365_test_00048546.jpg +Places365_test_00048551.jpg +Places365_test_00048584.jpg +Places365_test_00048593.jpg +Places365_test_00048596.jpg +Places365_test_00048609.jpg +Places365_test_00048631.jpg +Places365_test_00048646.jpg +Places365_test_00048650.jpg +Places365_test_00048651.jpg +Places365_test_00048658.jpg +Places365_test_00048677.jpg +Places365_test_00048686.jpg +Places365_test_00048695.jpg +Places365_test_00048696.jpg +Places365_test_00048705.jpg +Places365_test_00048716.jpg +Places365_test_00048748.jpg +Places365_test_00048754.jpg +Places365_test_00048757.jpg +Places365_test_00048765.jpg +Places365_test_00048769.jpg +Places365_test_00048779.jpg +Places365_test_00048781.jpg +Places365_test_00048810.jpg +Places365_test_00048817.jpg +Places365_test_00048854.jpg +Places365_test_00048859.jpg +Places365_test_00048866.jpg +Places365_test_00048893.jpg +Places365_test_00048902.jpg +Places365_test_00048915.jpg +Places365_test_00048941.jpg +Places365_test_00048948.jpg +Places365_test_00048960.jpg +Places365_test_00048968.jpg +Places365_test_00048969.jpg +Places365_test_00048983.jpg +Places365_test_00048991.jpg +Places365_test_00048995.jpg +Places365_test_00049005.jpg +Places365_test_00049008.jpg +Places365_test_00049013.jpg +Places365_test_00049061.jpg +Places365_test_00049062.jpg +Places365_test_00049072.jpg +Places365_test_00049073.jpg +Places365_test_00049097.jpg +Places365_test_00049103.jpg +Places365_test_00049116.jpg +Places365_test_00049121.jpg +Places365_test_00049124.jpg +Places365_test_00049144.jpg +Places365_test_00049171.jpg +Places365_test_00049192.jpg +Places365_test_00049193.jpg +Places365_test_00049199.jpg +Places365_test_00049232.jpg +Places365_test_00049238.jpg +Places365_test_00049270.jpg +Places365_test_00049288.jpg +Places365_test_00049297.jpg +Places365_test_00049322.jpg +Places365_test_00049343.jpg +Places365_test_00049345.jpg +Places365_test_00049375.jpg +Places365_test_00049390.jpg +Places365_test_00049391.jpg +Places365_test_00049396.jpg +Places365_test_00049397.jpg +Places365_test_00049411.jpg +Places365_test_00049431.jpg +Places365_test_00049442.jpg +Places365_test_00049447.jpg +Places365_test_00049458.jpg +Places365_test_00049461.jpg +Places365_test_00049476.jpg +Places365_test_00049503.jpg +Places365_test_00049522.jpg +Places365_test_00049544.jpg +Places365_test_00049549.jpg +Places365_test_00049556.jpg +Places365_test_00049573.jpg +Places365_test_00049576.jpg +Places365_test_00049585.jpg +Places365_test_00049599.jpg +Places365_test_00049646.jpg +Places365_test_00049662.jpg +Places365_test_00049677.jpg +Places365_test_00049689.jpg +Places365_test_00049708.jpg +Places365_test_00049710.jpg +Places365_test_00049735.jpg +Places365_test_00049749.jpg +Places365_test_00049752.jpg +Places365_test_00049789.jpg +Places365_test_00049805.jpg +Places365_test_00049822.jpg +Places365_test_00049823.jpg +Places365_test_00049828.jpg +Places365_test_00049838.jpg +Places365_test_00049839.jpg +Places365_test_00049841.jpg +Places365_test_00049843.jpg +Places365_test_00049860.jpg +Places365_test_00049886.jpg +Places365_test_00049911.jpg +Places365_test_00049926.jpg +Places365_test_00049930.jpg +Places365_test_00049960.jpg +Places365_test_00050024.jpg +Places365_test_00050030.jpg +Places365_test_00050031.jpg +Places365_test_00050044.jpg +Places365_test_00050055.jpg +Places365_test_00050064.jpg +Places365_test_00050077.jpg +Places365_test_00050081.jpg +Places365_test_00050085.jpg +Places365_test_00050093.jpg +Places365_test_00050103.jpg +Places365_test_00050119.jpg +Places365_test_00050127.jpg +Places365_test_00050131.jpg +Places365_test_00050134.jpg +Places365_test_00050151.jpg +Places365_test_00050179.jpg +Places365_test_00050191.jpg +Places365_test_00050193.jpg +Places365_test_00050228.jpg +Places365_test_00050268.jpg +Places365_test_00050270.jpg +Places365_test_00050280.jpg +Places365_test_00050285.jpg +Places365_test_00050292.jpg +Places365_test_00050297.jpg +Places365_test_00050315.jpg +Places365_test_00050325.jpg +Places365_test_00050330.jpg +Places365_test_00050353.jpg +Places365_test_00050359.jpg +Places365_test_00050389.jpg +Places365_test_00050392.jpg +Places365_test_00050401.jpg +Places365_test_00050411.jpg +Places365_test_00050429.jpg +Places365_test_00050432.jpg +Places365_test_00050434.jpg +Places365_test_00050436.jpg +Places365_test_00050475.jpg +Places365_test_00050489.jpg +Places365_test_00050498.jpg +Places365_test_00050499.jpg +Places365_test_00050508.jpg +Places365_test_00050510.jpg +Places365_test_00050522.jpg +Places365_test_00050536.jpg +Places365_test_00050549.jpg +Places365_test_00050554.jpg +Places365_test_00050569.jpg +Places365_test_00050585.jpg +Places365_test_00050602.jpg +Places365_test_00050605.jpg +Places365_test_00050606.jpg +Places365_test_00050624.jpg +Places365_test_00050633.jpg +Places365_test_00050638.jpg +Places365_test_00050639.jpg +Places365_test_00050643.jpg +Places365_test_00050674.jpg +Places365_test_00050696.jpg +Places365_test_00050717.jpg +Places365_test_00050727.jpg +Places365_test_00050751.jpg +Places365_test_00050765.jpg +Places365_test_00050769.jpg +Places365_test_00050778.jpg +Places365_test_00050788.jpg +Places365_test_00050813.jpg +Places365_test_00050816.jpg +Places365_test_00050839.jpg +Places365_test_00050860.jpg +Places365_test_00050868.jpg +Places365_test_00050875.jpg +Places365_test_00050887.jpg +Places365_test_00050891.jpg +Places365_test_00050898.jpg +Places365_test_00050904.jpg +Places365_test_00050908.jpg +Places365_test_00050915.jpg +Places365_test_00050925.jpg +Places365_test_00050934.jpg +Places365_test_00050935.jpg +Places365_test_00050953.jpg +Places365_test_00050987.jpg +Places365_test_00050995.jpg +Places365_test_00050997.jpg +Places365_test_00051001.jpg +Places365_test_00051020.jpg +Places365_test_00051032.jpg +Places365_test_00051044.jpg +Places365_test_00051048.jpg +Places365_test_00051070.jpg +Places365_test_00051074.jpg +Places365_test_00051115.jpg +Places365_test_00051119.jpg +Places365_test_00051132.jpg +Places365_test_00051133.jpg +Places365_test_00051139.jpg +Places365_test_00051158.jpg +Places365_test_00051178.jpg +Places365_test_00051181.jpg +Places365_test_00051194.jpg +Places365_test_00051196.jpg +Places365_test_00051214.jpg +Places365_test_00051225.jpg +Places365_test_00051234.jpg +Places365_test_00051251.jpg +Places365_test_00051261.jpg +Places365_test_00051262.jpg +Places365_test_00051275.jpg +Places365_test_00051277.jpg +Places365_test_00051291.jpg +Places365_test_00051297.jpg +Places365_test_00051317.jpg +Places365_test_00051325.jpg +Places365_test_00051326.jpg +Places365_test_00051334.jpg +Places365_test_00051341.jpg +Places365_test_00051354.jpg +Places365_test_00051359.jpg +Places365_test_00051370.jpg +Places365_test_00051371.jpg +Places365_test_00051389.jpg +Places365_test_00051390.jpg +Places365_test_00051401.jpg +Places365_test_00051406.jpg +Places365_test_00051427.jpg +Places365_test_00051428.jpg +Places365_test_00051450.jpg +Places365_test_00051455.jpg +Places365_test_00051456.jpg +Places365_test_00051474.jpg +Places365_test_00051502.jpg +Places365_test_00051521.jpg +Places365_test_00051526.jpg +Places365_test_00051536.jpg +Places365_test_00051556.jpg +Places365_test_00051561.jpg +Places365_test_00051573.jpg +Places365_test_00051582.jpg +Places365_test_00051594.jpg +Places365_test_00051635.jpg +Places365_test_00051638.jpg +Places365_test_00051660.jpg +Places365_test_00051695.jpg +Places365_test_00051705.jpg +Places365_test_00051725.jpg +Places365_test_00051728.jpg +Places365_test_00051742.jpg +Places365_test_00051759.jpg +Places365_test_00051784.jpg +Places365_test_00051794.jpg +Places365_test_00051804.jpg +Places365_test_00051806.jpg +Places365_test_00051838.jpg +Places365_test_00051843.jpg +Places365_test_00051872.jpg +Places365_test_00051881.jpg +Places365_test_00051905.jpg +Places365_test_00051935.jpg +Places365_test_00051958.jpg +Places365_test_00051966.jpg +Places365_test_00051975.jpg +Places365_test_00051987.jpg +Places365_test_00051994.jpg +Places365_test_00051999.jpg +Places365_test_00052004.jpg +Places365_test_00052020.jpg +Places365_test_00052029.jpg +Places365_test_00052032.jpg +Places365_test_00052033.jpg +Places365_test_00052036.jpg +Places365_test_00052044.jpg +Places365_test_00052048.jpg +Places365_test_00052050.jpg +Places365_test_00052051.jpg +Places365_test_00052057.jpg +Places365_test_00052064.jpg +Places365_test_00052107.jpg +Places365_test_00052115.jpg +Places365_test_00052117.jpg +Places365_test_00052142.jpg +Places365_test_00052143.jpg +Places365_test_00052154.jpg +Places365_test_00052158.jpg +Places365_test_00052178.jpg +Places365_test_00052179.jpg +Places365_test_00052197.jpg +Places365_test_00052203.jpg +Places365_test_00052206.jpg +Places365_test_00052214.jpg +Places365_test_00052218.jpg +Places365_test_00052223.jpg +Places365_test_00052240.jpg +Places365_test_00052244.jpg +Places365_test_00052297.jpg +Places365_test_00052305.jpg +Places365_test_00052306.jpg +Places365_test_00052314.jpg +Places365_test_00052324.jpg +Places365_test_00052332.jpg +Places365_test_00052336.jpg +Places365_test_00052340.jpg +Places365_test_00052344.jpg +Places365_test_00052347.jpg +Places365_test_00052360.jpg +Places365_test_00052388.jpg +Places365_test_00052392.jpg +Places365_test_00052393.jpg +Places365_test_00052415.jpg +Places365_test_00052431.jpg +Places365_test_00052437.jpg +Places365_test_00052438.jpg +Places365_test_00052448.jpg +Places365_test_00052461.jpg +Places365_test_00052478.jpg +Places365_test_00052480.jpg +Places365_test_00052491.jpg +Places365_test_00052498.jpg +Places365_test_00052541.jpg +Places365_test_00052546.jpg +Places365_test_00052561.jpg +Places365_test_00052562.jpg +Places365_test_00052574.jpg +Places365_test_00052583.jpg +Places365_test_00052595.jpg +Places365_test_00052599.jpg +Places365_test_00052602.jpg +Places365_test_00052624.jpg +Places365_test_00052625.jpg +Places365_test_00052627.jpg +Places365_test_00052631.jpg +Places365_test_00052643.jpg +Places365_test_00052653.jpg +Places365_test_00052664.jpg +Places365_test_00052669.jpg +Places365_test_00052672.jpg +Places365_test_00052674.jpg +Places365_test_00052677.jpg +Places365_test_00052710.jpg +Places365_test_00052714.jpg +Places365_test_00052721.jpg +Places365_test_00052731.jpg +Places365_test_00052740.jpg +Places365_test_00052752.jpg +Places365_test_00052753.jpg +Places365_test_00052770.jpg +Places365_test_00052787.jpg +Places365_test_00052833.jpg +Places365_test_00052838.jpg +Places365_test_00052845.jpg +Places365_test_00052854.jpg +Places365_test_00052878.jpg +Places365_test_00052880.jpg +Places365_test_00052892.jpg +Places365_test_00052912.jpg +Places365_test_00052923.jpg +Places365_test_00052941.jpg +Places365_test_00052945.jpg +Places365_test_00052960.jpg +Places365_test_00052961.jpg +Places365_test_00053005.jpg +Places365_test_00053015.jpg +Places365_test_00053049.jpg +Places365_test_00053054.jpg +Places365_test_00053061.jpg +Places365_test_00053070.jpg +Places365_test_00053080.jpg +Places365_test_00053093.jpg +Places365_test_00053124.jpg +Places365_test_00053131.jpg +Places365_test_00053140.jpg +Places365_test_00053141.jpg +Places365_test_00053146.jpg +Places365_test_00053158.jpg +Places365_test_00053162.jpg +Places365_test_00053168.jpg +Places365_test_00053185.jpg +Places365_test_00053197.jpg +Places365_test_00053220.jpg +Places365_test_00053221.jpg +Places365_test_00053227.jpg +Places365_test_00053229.jpg +Places365_test_00053234.jpg +Places365_test_00053245.jpg +Places365_test_00053251.jpg +Places365_test_00053298.jpg +Places365_test_00053311.jpg +Places365_test_00053313.jpg +Places365_test_00053333.jpg +Places365_test_00053336.jpg +Places365_test_00053337.jpg +Places365_test_00053355.jpg +Places365_test_00053359.jpg +Places365_test_00053365.jpg +Places365_test_00053367.jpg +Places365_test_00053380.jpg +Places365_test_00053393.jpg +Places365_test_00053436.jpg +Places365_test_00053470.jpg +Places365_test_00053476.jpg +Places365_test_00053491.jpg +Places365_test_00053514.jpg +Places365_test_00053530.jpg +Places365_test_00053544.jpg +Places365_test_00053557.jpg +Places365_test_00053569.jpg +Places365_test_00053576.jpg +Places365_test_00053596.jpg +Places365_test_00053604.jpg +Places365_test_00053606.jpg +Places365_test_00053627.jpg +Places365_test_00053633.jpg +Places365_test_00053635.jpg +Places365_test_00053644.jpg +Places365_test_00053658.jpg +Places365_test_00053661.jpg +Places365_test_00053709.jpg +Places365_test_00053713.jpg +Places365_test_00053725.jpg +Places365_test_00053728.jpg +Places365_test_00053756.jpg +Places365_test_00053774.jpg +Places365_test_00053788.jpg +Places365_test_00053805.jpg +Places365_test_00053822.jpg +Places365_test_00053852.jpg +Places365_test_00053866.jpg +Places365_test_00053875.jpg +Places365_test_00053879.jpg +Places365_test_00053882.jpg +Places365_test_00053887.jpg +Places365_test_00053894.jpg +Places365_test_00053911.jpg +Places365_test_00053933.jpg +Places365_test_00053947.jpg +Places365_test_00053949.jpg +Places365_test_00053973.jpg +Places365_test_00053978.jpg +Places365_test_00053985.jpg +Places365_test_00053993.jpg +Places365_test_00054005.jpg +Places365_test_00054014.jpg +Places365_test_00054017.jpg +Places365_test_00054036.jpg +Places365_test_00054039.jpg +Places365_test_00054053.jpg +Places365_test_00054054.jpg +Places365_test_00054070.jpg +Places365_test_00054078.jpg +Places365_test_00054086.jpg +Places365_test_00054094.jpg +Places365_test_00054107.jpg +Places365_test_00054112.jpg +Places365_test_00054130.jpg +Places365_test_00054198.jpg +Places365_test_00054217.jpg +Places365_test_00054224.jpg +Places365_test_00054241.jpg +Places365_test_00054259.jpg +Places365_test_00054268.jpg +Places365_test_00054280.jpg +Places365_test_00054290.jpg +Places365_test_00054303.jpg +Places365_test_00054314.jpg +Places365_test_00054316.jpg +Places365_test_00054324.jpg +Places365_test_00054331.jpg +Places365_test_00054334.jpg +Places365_test_00054337.jpg +Places365_test_00054340.jpg +Places365_test_00054347.jpg +Places365_test_00054356.jpg +Places365_test_00054357.jpg +Places365_test_00054360.jpg +Places365_test_00054378.jpg +Places365_test_00054383.jpg +Places365_test_00054394.jpg +Places365_test_00054402.jpg +Places365_test_00054409.jpg +Places365_test_00054414.jpg +Places365_test_00054440.jpg +Places365_test_00054445.jpg +Places365_test_00054462.jpg +Places365_test_00054463.jpg +Places365_test_00054474.jpg +Places365_test_00054476.jpg +Places365_test_00054481.jpg +Places365_test_00054485.jpg +Places365_test_00054493.jpg +Places365_test_00054501.jpg +Places365_test_00054521.jpg +Places365_test_00054542.jpg +Places365_test_00054593.jpg +Places365_test_00054602.jpg +Places365_test_00054616.jpg +Places365_test_00054634.jpg +Places365_test_00054637.jpg +Places365_test_00054660.jpg +Places365_test_00054666.jpg +Places365_test_00054684.jpg +Places365_test_00054686.jpg +Places365_test_00054693.jpg +Places365_test_00054695.jpg +Places365_test_00054713.jpg +Places365_test_00054737.jpg +Places365_test_00054739.jpg +Places365_test_00054752.jpg +Places365_test_00054755.jpg +Places365_test_00054775.jpg +Places365_test_00054779.jpg +Places365_test_00054783.jpg +Places365_test_00054793.jpg +Places365_test_00054813.jpg +Places365_test_00054816.jpg +Places365_test_00054817.jpg +Places365_test_00054818.jpg +Places365_test_00054824.jpg +Places365_test_00054843.jpg +Places365_test_00054855.jpg +Places365_test_00054857.jpg +Places365_test_00054862.jpg +Places365_test_00054867.jpg +Places365_test_00054875.jpg +Places365_test_00054879.jpg +Places365_test_00054882.jpg +Places365_test_00054894.jpg +Places365_test_00054904.jpg +Places365_test_00054916.jpg +Places365_test_00054922.jpg +Places365_test_00054930.jpg +Places365_test_00054939.jpg +Places365_test_00054944.jpg +Places365_test_00054956.jpg +Places365_test_00054960.jpg +Places365_test_00054965.jpg +Places365_test_00054969.jpg +Places365_test_00054999.jpg +Places365_test_00055000.jpg +Places365_test_00055012.jpg +Places365_test_00055017.jpg +Places365_test_00055022.jpg +Places365_test_00055043.jpg +Places365_test_00055067.jpg +Places365_test_00055081.jpg +Places365_test_00055083.jpg +Places365_test_00055093.jpg +Places365_test_00055127.jpg +Places365_test_00055141.jpg +Places365_test_00055153.jpg +Places365_test_00055161.jpg +Places365_test_00055162.jpg +Places365_test_00055174.jpg +Places365_test_00055196.jpg +Places365_test_00055202.jpg +Places365_test_00055215.jpg +Places365_test_00055226.jpg +Places365_test_00055288.jpg +Places365_test_00055298.jpg +Places365_test_00055324.jpg +Places365_test_00055333.jpg +Places365_test_00055338.jpg +Places365_test_00055340.jpg +Places365_test_00055354.jpg +Places365_test_00055356.jpg +Places365_test_00055390.jpg +Places365_test_00055397.jpg +Places365_test_00055404.jpg +Places365_test_00055409.jpg +Places365_test_00055424.jpg +Places365_test_00055429.jpg +Places365_test_00055437.jpg +Places365_test_00055440.jpg +Places365_test_00055467.jpg +Places365_test_00055473.jpg +Places365_test_00055503.jpg +Places365_test_00055504.jpg +Places365_test_00055518.jpg +Places365_test_00055563.jpg +Places365_test_00055574.jpg +Places365_test_00055583.jpg +Places365_test_00055585.jpg +Places365_test_00055599.jpg +Places365_test_00055611.jpg +Places365_test_00055675.jpg +Places365_test_00055679.jpg +Places365_test_00055688.jpg +Places365_test_00055691.jpg +Places365_test_00055697.jpg +Places365_test_00055705.jpg +Places365_test_00055719.jpg +Places365_test_00055722.jpg +Places365_test_00055724.jpg +Places365_test_00055728.jpg +Places365_test_00055738.jpg +Places365_test_00055739.jpg +Places365_test_00055748.jpg +Places365_test_00055764.jpg +Places365_test_00055765.jpg +Places365_test_00055782.jpg +Places365_test_00055799.jpg +Places365_test_00055803.jpg +Places365_test_00055811.jpg +Places365_test_00055816.jpg +Places365_test_00055819.jpg +Places365_test_00055826.jpg +Places365_test_00055838.jpg +Places365_test_00055843.jpg +Places365_test_00055856.jpg +Places365_test_00055863.jpg +Places365_test_00055884.jpg +Places365_test_00055896.jpg +Places365_test_00055911.jpg +Places365_test_00055915.jpg +Places365_test_00055935.jpg +Places365_test_00055960.jpg +Places365_test_00055965.jpg +Places365_test_00055984.jpg +Places365_test_00055993.jpg +Places365_test_00055998.jpg +Places365_test_00056001.jpg +Places365_test_00056004.jpg +Places365_test_00056010.jpg +Places365_test_00056033.jpg +Places365_test_00056050.jpg +Places365_test_00056061.jpg +Places365_test_00056062.jpg +Places365_test_00056067.jpg +Places365_test_00056071.jpg +Places365_test_00056075.jpg +Places365_test_00056076.jpg +Places365_test_00056080.jpg +Places365_test_00056082.jpg +Places365_test_00056084.jpg +Places365_test_00056093.jpg +Places365_test_00056097.jpg +Places365_test_00056116.jpg +Places365_test_00056198.jpg +Places365_test_00056207.jpg +Places365_test_00056223.jpg +Places365_test_00056236.jpg +Places365_test_00056237.jpg +Places365_test_00056243.jpg +Places365_test_00056247.jpg +Places365_test_00056263.jpg +Places365_test_00056271.jpg +Places365_test_00056289.jpg +Places365_test_00056300.jpg +Places365_test_00056301.jpg +Places365_test_00056302.jpg +Places365_test_00056319.jpg +Places365_test_00056322.jpg +Places365_test_00056328.jpg +Places365_test_00056340.jpg +Places365_test_00056360.jpg +Places365_test_00056372.jpg +Places365_test_00056375.jpg +Places365_test_00056398.jpg +Places365_test_00056403.jpg +Places365_test_00056422.jpg +Places365_test_00056424.jpg +Places365_test_00056425.jpg +Places365_test_00056427.jpg +Places365_test_00056430.jpg +Places365_test_00056437.jpg +Places365_test_00056438.jpg +Places365_test_00056439.jpg +Places365_test_00056459.jpg +Places365_test_00056480.jpg +Places365_test_00056493.jpg +Places365_test_00056495.jpg +Places365_test_00056498.jpg +Places365_test_00056528.jpg +Places365_test_00056529.jpg +Places365_test_00056532.jpg +Places365_test_00056536.jpg +Places365_test_00056541.jpg +Places365_test_00056553.jpg +Places365_test_00056564.jpg +Places365_test_00056565.jpg +Places365_test_00056573.jpg +Places365_test_00056593.jpg +Places365_test_00056597.jpg +Places365_test_00056598.jpg +Places365_test_00056609.jpg +Places365_test_00056615.jpg +Places365_test_00056623.jpg +Places365_test_00056629.jpg +Places365_test_00056631.jpg +Places365_test_00056662.jpg +Places365_test_00056671.jpg +Places365_test_00056674.jpg +Places365_test_00056677.jpg +Places365_test_00056680.jpg +Places365_test_00056731.jpg +Places365_test_00056744.jpg +Places365_test_00056778.jpg +Places365_test_00056788.jpg +Places365_test_00056790.jpg +Places365_test_00056796.jpg +Places365_test_00056816.jpg +Places365_test_00056818.jpg +Places365_test_00056836.jpg +Places365_test_00056837.jpg +Places365_test_00056853.jpg +Places365_test_00056854.jpg +Places365_test_00056861.jpg +Places365_test_00056868.jpg +Places365_test_00056884.jpg +Places365_test_00056904.jpg +Places365_test_00056912.jpg +Places365_test_00056918.jpg +Places365_test_00056932.jpg +Places365_test_00056934.jpg +Places365_test_00056955.jpg +Places365_test_00056982.jpg +Places365_test_00056986.jpg +Places365_test_00056990.jpg +Places365_test_00057000.jpg +Places365_test_00057001.jpg +Places365_test_00057008.jpg +Places365_test_00057022.jpg +Places365_test_00057027.jpg +Places365_test_00057039.jpg +Places365_test_00057043.jpg +Places365_test_00057080.jpg +Places365_test_00057101.jpg +Places365_test_00057106.jpg +Places365_test_00057113.jpg +Places365_test_00057125.jpg +Places365_test_00057130.jpg +Places365_test_00057143.jpg +Places365_test_00057144.jpg +Places365_test_00057157.jpg +Places365_test_00057160.jpg +Places365_test_00057163.jpg +Places365_test_00057173.jpg +Places365_test_00057191.jpg +Places365_test_00057238.jpg +Places365_test_00057255.jpg +Places365_test_00057262.jpg +Places365_test_00057271.jpg +Places365_test_00057292.jpg +Places365_test_00057324.jpg +Places365_test_00057332.jpg +Places365_test_00057357.jpg +Places365_test_00057361.jpg +Places365_test_00057363.jpg +Places365_test_00057386.jpg +Places365_test_00057392.jpg +Places365_test_00057428.jpg +Places365_test_00057457.jpg +Places365_test_00057471.jpg +Places365_test_00057481.jpg +Places365_test_00057498.jpg +Places365_test_00057500.jpg +Places365_test_00057522.jpg +Places365_test_00057536.jpg +Places365_test_00057541.jpg +Places365_test_00057577.jpg +Places365_test_00057579.jpg +Places365_test_00057591.jpg +Places365_test_00057596.jpg +Places365_test_00057599.jpg +Places365_test_00057601.jpg +Places365_test_00057615.jpg +Places365_test_00057636.jpg +Places365_test_00057638.jpg +Places365_test_00057664.jpg +Places365_test_00057677.jpg +Places365_test_00057682.jpg +Places365_test_00057684.jpg +Places365_test_00057690.jpg +Places365_test_00057693.jpg +Places365_test_00057739.jpg +Places365_test_00057743.jpg +Places365_test_00057744.jpg +Places365_test_00057750.jpg +Places365_test_00057751.jpg +Places365_test_00057754.jpg +Places365_test_00057755.jpg +Places365_test_00057766.jpg +Places365_test_00057780.jpg +Places365_test_00057793.jpg +Places365_test_00057796.jpg +Places365_test_00057808.jpg +Places365_test_00057813.jpg +Places365_test_00057818.jpg +Places365_test_00057831.jpg +Places365_test_00057842.jpg +Places365_test_00057847.jpg +Places365_test_00057850.jpg +Places365_test_00057858.jpg +Places365_test_00057859.jpg +Places365_test_00057861.jpg +Places365_test_00057864.jpg +Places365_test_00057882.jpg +Places365_test_00057897.jpg +Places365_test_00057903.jpg +Places365_test_00057922.jpg +Places365_test_00057938.jpg +Places365_test_00057943.jpg +Places365_test_00057958.jpg +Places365_test_00057965.jpg +Places365_test_00057977.jpg +Places365_test_00057979.jpg +Places365_test_00057995.jpg +Places365_test_00058002.jpg +Places365_test_00058062.jpg +Places365_test_00058077.jpg +Places365_test_00058108.jpg +Places365_test_00058110.jpg +Places365_test_00058111.jpg +Places365_test_00058112.jpg +Places365_test_00058118.jpg +Places365_test_00058125.jpg +Places365_test_00058139.jpg +Places365_test_00058146.jpg +Places365_test_00058151.jpg +Places365_test_00058154.jpg +Places365_test_00058183.jpg +Places365_test_00058238.jpg +Places365_test_00058250.jpg +Places365_test_00058271.jpg +Places365_test_00058276.jpg +Places365_test_00058282.jpg +Places365_test_00058286.jpg +Places365_test_00058288.jpg +Places365_test_00058290.jpg +Places365_test_00058293.jpg +Places365_test_00058298.jpg +Places365_test_00058313.jpg +Places365_test_00058330.jpg +Places365_test_00058332.jpg +Places365_test_00058355.jpg +Places365_test_00058356.jpg +Places365_test_00058370.jpg +Places365_test_00058407.jpg +Places365_test_00058413.jpg +Places365_test_00058418.jpg +Places365_test_00058430.jpg +Places365_test_00058447.jpg +Places365_test_00058452.jpg +Places365_test_00058480.jpg +Places365_test_00058484.jpg +Places365_test_00058488.jpg +Places365_test_00058507.jpg +Places365_test_00058553.jpg +Places365_test_00058567.jpg +Places365_test_00058588.jpg +Places365_test_00058589.jpg +Places365_test_00058610.jpg +Places365_test_00058620.jpg +Places365_test_00058626.jpg +Places365_test_00058642.jpg +Places365_test_00058650.jpg +Places365_test_00058667.jpg +Places365_test_00058670.jpg +Places365_test_00058680.jpg +Places365_test_00058686.jpg +Places365_test_00058699.jpg +Places365_test_00058714.jpg +Places365_test_00058728.jpg +Places365_test_00058760.jpg +Places365_test_00058787.jpg +Places365_test_00058817.jpg +Places365_test_00058824.jpg +Places365_test_00058837.jpg +Places365_test_00058841.jpg +Places365_test_00058843.jpg +Places365_test_00058868.jpg +Places365_test_00058886.jpg +Places365_test_00058902.jpg +Places365_test_00058904.jpg +Places365_test_00058919.jpg +Places365_test_00058932.jpg +Places365_test_00058948.jpg +Places365_test_00058956.jpg +Places365_test_00058981.jpg +Places365_test_00059010.jpg +Places365_test_00059036.jpg +Places365_test_00059043.jpg +Places365_test_00059053.jpg +Places365_test_00059059.jpg +Places365_test_00059066.jpg +Places365_test_00059092.jpg +Places365_test_00059120.jpg +Places365_test_00059140.jpg +Places365_test_00059143.jpg +Places365_test_00059151.jpg +Places365_test_00059165.jpg +Places365_test_00059175.jpg +Places365_test_00059185.jpg +Places365_test_00059196.jpg +Places365_test_00059208.jpg +Places365_test_00059211.jpg +Places365_test_00059218.jpg +Places365_test_00059237.jpg +Places365_test_00059247.jpg +Places365_test_00059255.jpg +Places365_test_00059261.jpg +Places365_test_00059268.jpg +Places365_test_00059295.jpg +Places365_test_00059304.jpg +Places365_test_00059309.jpg +Places365_test_00059314.jpg +Places365_test_00059338.jpg +Places365_test_00059354.jpg +Places365_test_00059361.jpg +Places365_test_00059365.jpg +Places365_test_00059369.jpg +Places365_test_00059386.jpg +Places365_test_00059403.jpg +Places365_test_00059410.jpg +Places365_test_00059422.jpg +Places365_test_00059423.jpg +Places365_test_00059424.jpg +Places365_test_00059426.jpg +Places365_test_00059430.jpg +Places365_test_00059461.jpg +Places365_test_00059463.jpg +Places365_test_00059464.jpg +Places365_test_00059465.jpg +Places365_test_00059471.jpg +Places365_test_00059474.jpg +Places365_test_00059492.jpg +Places365_test_00059494.jpg +Places365_test_00059501.jpg +Places365_test_00059517.jpg +Places365_test_00059522.jpg +Places365_test_00059549.jpg +Places365_test_00059567.jpg +Places365_test_00059597.jpg +Places365_test_00059639.jpg +Places365_test_00059644.jpg +Places365_test_00059646.jpg +Places365_test_00059669.jpg +Places365_test_00059671.jpg +Places365_test_00059680.jpg +Places365_test_00059690.jpg +Places365_test_00059704.jpg +Places365_test_00059720.jpg +Places365_test_00059742.jpg +Places365_test_00059745.jpg +Places365_test_00059746.jpg +Places365_test_00059760.jpg +Places365_test_00059762.jpg +Places365_test_00059764.jpg +Places365_test_00059766.jpg +Places365_test_00059788.jpg +Places365_test_00059810.jpg +Places365_test_00059838.jpg +Places365_test_00059847.jpg +Places365_test_00059880.jpg +Places365_test_00059904.jpg +Places365_test_00059907.jpg +Places365_test_00059933.jpg +Places365_test_00059936.jpg +Places365_test_00059947.jpg +Places365_test_00059988.jpg +Places365_test_00059991.jpg +Places365_test_00060005.jpg +Places365_test_00060021.jpg +Places365_test_00060022.jpg +Places365_test_00060023.jpg +Places365_test_00060024.jpg +Places365_test_00060033.jpg +Places365_test_00060039.jpg +Places365_test_00060099.jpg +Places365_test_00060121.jpg +Places365_test_00060132.jpg +Places365_test_00060133.jpg +Places365_test_00060139.jpg +Places365_test_00060155.jpg +Places365_test_00060179.jpg +Places365_test_00060189.jpg +Places365_test_00060193.jpg +Places365_test_00060195.jpg +Places365_test_00060200.jpg +Places365_test_00060204.jpg +Places365_test_00060209.jpg +Places365_test_00060222.jpg +Places365_test_00060247.jpg +Places365_test_00060251.jpg +Places365_test_00060273.jpg +Places365_test_00060308.jpg +Places365_test_00060317.jpg +Places365_test_00060320.jpg +Places365_test_00060333.jpg +Places365_test_00060334.jpg +Places365_test_00060342.jpg +Places365_test_00060355.jpg +Places365_test_00060368.jpg +Places365_test_00060370.jpg +Places365_test_00060373.jpg +Places365_test_00060374.jpg +Places365_test_00060380.jpg +Places365_test_00060401.jpg +Places365_test_00060411.jpg +Places365_test_00060414.jpg +Places365_test_00060419.jpg +Places365_test_00060433.jpg +Places365_test_00060443.jpg +Places365_test_00060458.jpg +Places365_test_00060474.jpg +Places365_test_00060485.jpg +Places365_test_00060510.jpg +Places365_test_00060511.jpg +Places365_test_00060514.jpg +Places365_test_00060517.jpg +Places365_test_00060519.jpg +Places365_test_00060546.jpg +Places365_test_00060568.jpg +Places365_test_00060626.jpg +Places365_test_00060650.jpg +Places365_test_00060656.jpg +Places365_test_00060661.jpg +Places365_test_00060675.jpg +Places365_test_00060680.jpg +Places365_test_00060681.jpg +Places365_test_00060688.jpg +Places365_test_00060709.jpg +Places365_test_00060713.jpg +Places365_test_00060714.jpg +Places365_test_00060723.jpg +Places365_test_00060725.jpg +Places365_test_00060726.jpg +Places365_test_00060745.jpg +Places365_test_00060746.jpg +Places365_test_00060751.jpg +Places365_test_00060754.jpg +Places365_test_00060776.jpg +Places365_test_00060786.jpg +Places365_test_00060798.jpg +Places365_test_00060801.jpg +Places365_test_00060820.jpg +Places365_test_00060835.jpg +Places365_test_00060841.jpg +Places365_test_00060846.jpg +Places365_test_00060853.jpg +Places365_test_00060858.jpg +Places365_test_00060859.jpg +Places365_test_00060880.jpg +Places365_test_00060890.jpg +Places365_test_00060896.jpg +Places365_test_00060906.jpg +Places365_test_00060907.jpg +Places365_test_00060910.jpg +Places365_test_00060914.jpg +Places365_test_00060916.jpg +Places365_test_00060943.jpg +Places365_test_00060971.jpg +Places365_test_00060985.jpg +Places365_test_00060987.jpg +Places365_test_00060991.jpg +Places365_test_00060995.jpg +Places365_test_00060996.jpg +Places365_test_00060997.jpg +Places365_test_00061013.jpg +Places365_test_00061015.jpg +Places365_test_00061024.jpg +Places365_test_00061041.jpg +Places365_test_00061044.jpg +Places365_test_00061046.jpg +Places365_test_00061056.jpg +Places365_test_00061069.jpg +Places365_test_00061079.jpg +Places365_test_00061090.jpg +Places365_test_00061110.jpg +Places365_test_00061111.jpg +Places365_test_00061121.jpg +Places365_test_00061122.jpg +Places365_test_00061153.jpg +Places365_test_00061180.jpg +Places365_test_00061184.jpg +Places365_test_00061192.jpg +Places365_test_00061204.jpg +Places365_test_00061207.jpg +Places365_test_00061230.jpg +Places365_test_00061240.jpg +Places365_test_00061245.jpg +Places365_test_00061270.jpg +Places365_test_00061275.jpg +Places365_test_00061291.jpg +Places365_test_00061299.jpg +Places365_test_00061301.jpg +Places365_test_00061318.jpg +Places365_test_00061332.jpg +Places365_test_00061369.jpg +Places365_test_00061370.jpg +Places365_test_00061406.jpg +Places365_test_00061414.jpg +Places365_test_00061415.jpg +Places365_test_00061420.jpg +Places365_test_00061448.jpg +Places365_test_00061455.jpg +Places365_test_00061478.jpg +Places365_test_00061499.jpg +Places365_test_00061510.jpg +Places365_test_00061528.jpg +Places365_test_00061529.jpg +Places365_test_00061533.jpg +Places365_test_00061547.jpg +Places365_test_00061553.jpg +Places365_test_00061563.jpg +Places365_test_00061570.jpg +Places365_test_00061606.jpg +Places365_test_00061609.jpg +Places365_test_00061619.jpg +Places365_test_00061646.jpg +Places365_test_00061658.jpg +Places365_test_00061662.jpg +Places365_test_00061665.jpg +Places365_test_00061689.jpg +Places365_test_00061698.jpg +Places365_test_00061711.jpg +Places365_test_00061714.jpg +Places365_test_00061744.jpg +Places365_test_00061747.jpg +Places365_test_00061748.jpg +Places365_test_00061769.jpg +Places365_test_00061780.jpg +Places365_test_00061786.jpg +Places365_test_00061797.jpg +Places365_test_00061812.jpg +Places365_test_00061820.jpg +Places365_test_00061837.jpg +Places365_test_00061843.jpg +Places365_test_00061858.jpg +Places365_test_00061865.jpg +Places365_test_00061866.jpg +Places365_test_00061867.jpg +Places365_test_00061869.jpg +Places365_test_00061880.jpg +Places365_test_00061900.jpg +Places365_test_00061925.jpg +Places365_test_00061935.jpg +Places365_test_00061940.jpg +Places365_test_00061949.jpg +Places365_test_00061964.jpg +Places365_test_00061966.jpg +Places365_test_00061976.jpg +Places365_test_00061985.jpg +Places365_test_00061990.jpg +Places365_test_00062020.jpg +Places365_test_00062081.jpg +Places365_test_00062109.jpg +Places365_test_00062112.jpg +Places365_test_00062125.jpg +Places365_test_00062138.jpg +Places365_test_00062151.jpg +Places365_test_00062157.jpg +Places365_test_00062160.jpg +Places365_test_00062172.jpg +Places365_test_00062183.jpg +Places365_test_00062209.jpg +Places365_test_00062215.jpg +Places365_test_00062216.jpg +Places365_test_00062218.jpg +Places365_test_00062225.jpg +Places365_test_00062231.jpg +Places365_test_00062236.jpg +Places365_test_00062242.jpg +Places365_test_00062251.jpg +Places365_test_00062252.jpg +Places365_test_00062254.jpg +Places365_test_00062276.jpg +Places365_test_00062277.jpg +Places365_test_00062313.jpg +Places365_test_00062327.jpg +Places365_test_00062335.jpg +Places365_test_00062352.jpg +Places365_test_00062358.jpg +Places365_test_00062374.jpg +Places365_test_00062376.jpg +Places365_test_00062387.jpg +Places365_test_00062393.jpg +Places365_test_00062398.jpg +Places365_test_00062400.jpg +Places365_test_00062404.jpg +Places365_test_00062426.jpg +Places365_test_00062433.jpg +Places365_test_00062443.jpg +Places365_test_00062465.jpg +Places365_test_00062467.jpg +Places365_test_00062486.jpg +Places365_test_00062497.jpg +Places365_test_00062538.jpg +Places365_test_00062552.jpg +Places365_test_00062589.jpg +Places365_test_00062615.jpg +Places365_test_00062622.jpg +Places365_test_00062648.jpg +Places365_test_00062668.jpg +Places365_test_00062691.jpg +Places365_test_00062695.jpg +Places365_test_00062708.jpg +Places365_test_00062739.jpg +Places365_test_00062745.jpg +Places365_test_00062769.jpg +Places365_test_00062773.jpg +Places365_test_00062774.jpg +Places365_test_00062787.jpg +Places365_test_00062793.jpg +Places365_test_00062800.jpg +Places365_test_00062806.jpg +Places365_test_00062810.jpg +Places365_test_00062817.jpg +Places365_test_00062823.jpg +Places365_test_00062852.jpg +Places365_test_00062865.jpg +Places365_test_00062873.jpg +Places365_test_00062875.jpg +Places365_test_00062881.jpg +Places365_test_00062886.jpg +Places365_test_00062888.jpg +Places365_test_00062890.jpg +Places365_test_00062907.jpg +Places365_test_00062912.jpg +Places365_test_00062915.jpg +Places365_test_00062917.jpg +Places365_test_00062936.jpg +Places365_test_00062963.jpg +Places365_test_00062966.jpg +Places365_test_00062988.jpg +Places365_test_00062992.jpg +Places365_test_00062994.jpg +Places365_test_00063003.jpg +Places365_test_00063012.jpg +Places365_test_00063018.jpg +Places365_test_00063021.jpg +Places365_test_00063040.jpg +Places365_test_00063056.jpg +Places365_test_00063059.jpg +Places365_test_00063076.jpg +Places365_test_00063095.jpg +Places365_test_00063117.jpg +Places365_test_00063129.jpg +Places365_test_00063132.jpg +Places365_test_00063148.jpg +Places365_test_00063150.jpg +Places365_test_00063158.jpg +Places365_test_00063181.jpg +Places365_test_00063187.jpg +Places365_test_00063194.jpg +Places365_test_00063196.jpg +Places365_test_00063215.jpg +Places365_test_00063219.jpg +Places365_test_00063232.jpg +Places365_test_00063236.jpg +Places365_test_00063239.jpg +Places365_test_00063243.jpg +Places365_test_00063244.jpg +Places365_test_00063246.jpg +Places365_test_00063257.jpg +Places365_test_00063261.jpg +Places365_test_00063264.jpg +Places365_test_00063275.jpg +Places365_test_00063279.jpg +Places365_test_00063280.jpg +Places365_test_00063310.jpg +Places365_test_00063311.jpg +Places365_test_00063325.jpg +Places365_test_00063332.jpg +Places365_test_00063340.jpg +Places365_test_00063346.jpg +Places365_test_00063353.jpg +Places365_test_00063355.jpg +Places365_test_00063369.jpg +Places365_test_00063377.jpg +Places365_test_00063394.jpg +Places365_test_00063399.jpg +Places365_test_00063407.jpg +Places365_test_00063408.jpg +Places365_test_00063409.jpg +Places365_test_00063444.jpg +Places365_test_00063449.jpg +Places365_test_00063452.jpg +Places365_test_00063465.jpg +Places365_test_00063471.jpg +Places365_test_00063483.jpg +Places365_test_00063492.jpg +Places365_test_00063497.jpg +Places365_test_00063501.jpg +Places365_test_00063516.jpg +Places365_test_00063554.jpg +Places365_test_00063565.jpg +Places365_test_00063580.jpg +Places365_test_00063581.jpg +Places365_test_00063585.jpg +Places365_test_00063588.jpg +Places365_test_00063598.jpg +Places365_test_00063606.jpg +Places365_test_00063610.jpg +Places365_test_00063615.jpg +Places365_test_00063618.jpg +Places365_test_00063637.jpg +Places365_test_00063644.jpg +Places365_test_00063661.jpg +Places365_test_00063662.jpg +Places365_test_00063681.jpg +Places365_test_00063707.jpg +Places365_test_00063724.jpg +Places365_test_00063731.jpg +Places365_test_00063734.jpg +Places365_test_00063822.jpg +Places365_test_00063841.jpg +Places365_test_00063848.jpg +Places365_test_00063861.jpg +Places365_test_00063879.jpg +Places365_test_00063886.jpg +Places365_test_00063902.jpg +Places365_test_00063908.jpg +Places365_test_00063924.jpg +Places365_test_00063926.jpg +Places365_test_00063951.jpg +Places365_test_00063959.jpg +Places365_test_00063963.jpg +Places365_test_00063969.jpg +Places365_test_00063970.jpg +Places365_test_00063976.jpg +Places365_test_00063980.jpg +Places365_test_00063986.jpg +Places365_test_00063992.jpg +Places365_test_00064001.jpg +Places365_test_00064008.jpg +Places365_test_00064015.jpg +Places365_test_00064018.jpg +Places365_test_00064022.jpg +Places365_test_00064033.jpg +Places365_test_00064034.jpg +Places365_test_00064068.jpg +Places365_test_00064080.jpg +Places365_test_00064083.jpg +Places365_test_00064100.jpg +Places365_test_00064105.jpg +Places365_test_00064108.jpg +Places365_test_00064119.jpg +Places365_test_00064130.jpg +Places365_test_00064151.jpg +Places365_test_00064158.jpg +Places365_test_00064167.jpg +Places365_test_00064177.jpg +Places365_test_00064184.jpg +Places365_test_00064192.jpg +Places365_test_00064196.jpg +Places365_test_00064199.jpg +Places365_test_00064204.jpg +Places365_test_00064209.jpg +Places365_test_00064214.jpg +Places365_test_00064219.jpg +Places365_test_00064222.jpg +Places365_test_00064231.jpg +Places365_test_00064242.jpg +Places365_test_00064245.jpg +Places365_test_00064262.jpg +Places365_test_00064266.jpg +Places365_test_00064267.jpg +Places365_test_00064271.jpg +Places365_test_00064276.jpg +Places365_test_00064277.jpg +Places365_test_00064302.jpg +Places365_test_00064303.jpg +Places365_test_00064311.jpg +Places365_test_00064322.jpg +Places365_test_00064331.jpg +Places365_test_00064339.jpg +Places365_test_00064350.jpg +Places365_test_00064351.jpg +Places365_test_00064357.jpg +Places365_test_00064369.jpg +Places365_test_00064388.jpg +Places365_test_00064395.jpg +Places365_test_00064399.jpg +Places365_test_00064439.jpg +Places365_test_00064454.jpg +Places365_test_00064480.jpg +Places365_test_00064483.jpg +Places365_test_00064494.jpg +Places365_test_00064510.jpg +Places365_test_00064519.jpg +Places365_test_00064530.jpg +Places365_test_00064533.jpg +Places365_test_00064547.jpg +Places365_test_00064577.jpg +Places365_test_00064581.jpg +Places365_test_00064590.jpg +Places365_test_00064595.jpg +Places365_test_00064605.jpg +Places365_test_00064608.jpg +Places365_test_00064615.jpg +Places365_test_00064630.jpg +Places365_test_00064631.jpg +Places365_test_00064634.jpg +Places365_test_00064644.jpg +Places365_test_00064660.jpg +Places365_test_00064672.jpg +Places365_test_00064675.jpg +Places365_test_00064681.jpg +Places365_test_00064716.jpg +Places365_test_00064722.jpg +Places365_test_00064734.jpg +Places365_test_00064742.jpg +Places365_test_00064743.jpg +Places365_test_00064748.jpg +Places365_test_00064752.jpg +Places365_test_00064755.jpg +Places365_test_00064777.jpg +Places365_test_00064786.jpg +Places365_test_00064787.jpg +Places365_test_00064789.jpg +Places365_test_00064798.jpg +Places365_test_00064804.jpg +Places365_test_00064806.jpg +Places365_test_00064815.jpg +Places365_test_00064817.jpg +Places365_test_00064826.jpg +Places365_test_00064858.jpg +Places365_test_00064897.jpg +Places365_test_00064920.jpg +Places365_test_00064921.jpg +Places365_test_00064933.jpg +Places365_test_00064935.jpg +Places365_test_00064947.jpg +Places365_test_00064972.jpg +Places365_test_00064980.jpg +Places365_test_00064992.jpg +Places365_test_00064994.jpg +Places365_test_00065006.jpg +Places365_test_00065015.jpg +Places365_test_00065020.jpg +Places365_test_00065032.jpg +Places365_test_00065039.jpg +Places365_test_00065056.jpg +Places365_test_00065106.jpg +Places365_test_00065110.jpg +Places365_test_00065123.jpg +Places365_test_00065148.jpg +Places365_test_00065149.jpg +Places365_test_00065153.jpg +Places365_test_00065156.jpg +Places365_test_00065177.jpg +Places365_test_00065187.jpg +Places365_test_00065196.jpg +Places365_test_00065208.jpg +Places365_test_00065214.jpg +Places365_test_00065216.jpg +Places365_test_00065225.jpg +Places365_test_00065237.jpg +Places365_test_00065248.jpg +Places365_test_00065253.jpg +Places365_test_00065257.jpg +Places365_test_00065262.jpg +Places365_test_00065270.jpg +Places365_test_00065289.jpg +Places365_test_00065301.jpg +Places365_test_00065302.jpg +Places365_test_00065303.jpg +Places365_test_00065335.jpg +Places365_test_00065349.jpg +Places365_test_00065380.jpg +Places365_test_00065385.jpg +Places365_test_00065393.jpg +Places365_test_00065400.jpg +Places365_test_00065402.jpg +Places365_test_00065403.jpg +Places365_test_00065419.jpg +Places365_test_00065435.jpg +Places365_test_00065436.jpg +Places365_test_00065457.jpg +Places365_test_00065474.jpg +Places365_test_00065475.jpg +Places365_test_00065482.jpg +Places365_test_00065500.jpg +Places365_test_00065507.jpg +Places365_test_00065524.jpg +Places365_test_00065529.jpg +Places365_test_00065536.jpg +Places365_test_00065543.jpg +Places365_test_00065560.jpg +Places365_test_00065566.jpg +Places365_test_00065577.jpg +Places365_test_00065580.jpg +Places365_test_00065587.jpg +Places365_test_00065596.jpg +Places365_test_00065598.jpg +Places365_test_00065615.jpg +Places365_test_00065630.jpg +Places365_test_00065638.jpg +Places365_test_00065642.jpg +Places365_test_00065656.jpg +Places365_test_00065657.jpg +Places365_test_00065688.jpg +Places365_test_00065693.jpg +Places365_test_00065694.jpg +Places365_test_00065705.jpg +Places365_test_00065721.jpg +Places365_test_00065724.jpg +Places365_test_00065725.jpg +Places365_test_00065731.jpg +Places365_test_00065755.jpg +Places365_test_00065762.jpg +Places365_test_00065771.jpg +Places365_test_00065779.jpg +Places365_test_00065785.jpg +Places365_test_00065799.jpg +Places365_test_00065803.jpg +Places365_test_00065811.jpg +Places365_test_00065813.jpg +Places365_test_00065818.jpg +Places365_test_00065822.jpg +Places365_test_00065874.jpg +Places365_test_00065887.jpg +Places365_test_00065896.jpg +Places365_test_00065931.jpg +Places365_test_00065933.jpg +Places365_test_00065960.jpg +Places365_test_00065971.jpg +Places365_test_00065990.jpg +Places365_test_00065992.jpg +Places365_test_00066010.jpg +Places365_test_00066040.jpg +Places365_test_00066041.jpg +Places365_test_00066048.jpg +Places365_test_00066060.jpg +Places365_test_00066062.jpg +Places365_test_00066070.jpg +Places365_test_00066101.jpg +Places365_test_00066120.jpg +Places365_test_00066125.jpg +Places365_test_00066129.jpg +Places365_test_00066139.jpg +Places365_test_00066145.jpg +Places365_test_00066173.jpg +Places365_test_00066183.jpg +Places365_test_00066203.jpg +Places365_test_00066213.jpg +Places365_test_00066227.jpg +Places365_test_00066238.jpg +Places365_test_00066246.jpg +Places365_test_00066250.jpg +Places365_test_00066263.jpg +Places365_test_00066269.jpg +Places365_test_00066274.jpg +Places365_test_00066277.jpg +Places365_test_00066289.jpg +Places365_test_00066290.jpg +Places365_test_00066291.jpg +Places365_test_00066299.jpg +Places365_test_00066306.jpg +Places365_test_00066307.jpg +Places365_test_00066329.jpg +Places365_test_00066330.jpg +Places365_test_00066341.jpg +Places365_test_00066346.jpg +Places365_test_00066355.jpg +Places365_test_00066366.jpg +Places365_test_00066384.jpg +Places365_test_00066396.jpg +Places365_test_00066411.jpg +Places365_test_00066414.jpg +Places365_test_00066419.jpg +Places365_test_00066423.jpg +Places365_test_00066431.jpg +Places365_test_00066433.jpg +Places365_test_00066437.jpg +Places365_test_00066447.jpg +Places365_test_00066461.jpg +Places365_test_00066463.jpg +Places365_test_00066498.jpg +Places365_test_00066502.jpg +Places365_test_00066506.jpg +Places365_test_00066512.jpg +Places365_test_00066522.jpg +Places365_test_00066523.jpg +Places365_test_00066525.jpg +Places365_test_00066536.jpg +Places365_test_00066558.jpg +Places365_test_00066571.jpg +Places365_test_00066576.jpg +Places365_test_00066588.jpg +Places365_test_00066592.jpg +Places365_test_00066617.jpg +Places365_test_00066637.jpg +Places365_test_00066639.jpg +Places365_test_00066644.jpg +Places365_test_00066681.jpg +Places365_test_00066690.jpg +Places365_test_00066713.jpg +Places365_test_00066744.jpg +Places365_test_00066764.jpg +Places365_test_00066780.jpg +Places365_test_00066806.jpg +Places365_test_00066811.jpg +Places365_test_00066814.jpg +Places365_test_00066817.jpg +Places365_test_00066820.jpg +Places365_test_00066832.jpg +Places365_test_00066844.jpg +Places365_test_00066859.jpg +Places365_test_00066867.jpg +Places365_test_00066879.jpg +Places365_test_00066890.jpg +Places365_test_00066929.jpg +Places365_test_00066931.jpg +Places365_test_00066936.jpg +Places365_test_00066941.jpg +Places365_test_00066944.jpg +Places365_test_00066954.jpg +Places365_test_00066961.jpg +Places365_test_00066968.jpg +Places365_test_00066977.jpg +Places365_test_00066989.jpg +Places365_test_00067003.jpg +Places365_test_00067016.jpg +Places365_test_00067041.jpg +Places365_test_00067044.jpg +Places365_test_00067062.jpg +Places365_test_00067091.jpg +Places365_test_00067095.jpg +Places365_test_00067102.jpg +Places365_test_00067109.jpg +Places365_test_00067111.jpg +Places365_test_00067119.jpg +Places365_test_00067129.jpg +Places365_test_00067141.jpg +Places365_test_00067146.jpg +Places365_test_00067171.jpg +Places365_test_00067177.jpg +Places365_test_00067181.jpg +Places365_test_00067191.jpg +Places365_test_00067197.jpg +Places365_test_00067242.jpg +Places365_test_00067254.jpg +Places365_test_00067296.jpg +Places365_test_00067298.jpg +Places365_test_00067311.jpg +Places365_test_00067314.jpg +Places365_test_00067321.jpg +Places365_test_00067334.jpg +Places365_test_00067361.jpg +Places365_test_00067362.jpg +Places365_test_00067372.jpg +Places365_test_00067373.jpg +Places365_test_00067386.jpg +Places365_test_00067390.jpg +Places365_test_00067407.jpg +Places365_test_00067434.jpg +Places365_test_00067439.jpg +Places365_test_00067451.jpg +Places365_test_00067463.jpg +Places365_test_00067466.jpg +Places365_test_00067476.jpg +Places365_test_00067484.jpg +Places365_test_00067517.jpg +Places365_test_00067537.jpg +Places365_test_00067552.jpg +Places365_test_00067559.jpg +Places365_test_00067561.jpg +Places365_test_00067593.jpg +Places365_test_00067630.jpg +Places365_test_00067634.jpg +Places365_test_00067637.jpg +Places365_test_00067640.jpg +Places365_test_00067653.jpg +Places365_test_00067654.jpg +Places365_test_00067665.jpg +Places365_test_00067673.jpg +Places365_test_00067674.jpg +Places365_test_00067682.jpg +Places365_test_00067704.jpg +Places365_test_00067710.jpg +Places365_test_00067712.jpg +Places365_test_00067716.jpg +Places365_test_00067727.jpg +Places365_test_00067730.jpg +Places365_test_00067749.jpg +Places365_test_00067752.jpg +Places365_test_00067775.jpg +Places365_test_00067831.jpg +Places365_test_00067856.jpg +Places365_test_00067876.jpg +Places365_test_00067890.jpg +Places365_test_00067895.jpg +Places365_test_00067909.jpg +Places365_test_00067911.jpg +Places365_test_00067920.jpg +Places365_test_00067927.jpg +Places365_test_00067930.jpg +Places365_test_00067945.jpg +Places365_test_00067951.jpg +Places365_test_00067966.jpg +Places365_test_00067967.jpg +Places365_test_00067968.jpg +Places365_test_00067973.jpg +Places365_test_00067980.jpg +Places365_test_00067996.jpg +Places365_test_00068000.jpg +Places365_test_00068023.jpg +Places365_test_00068030.jpg +Places365_test_00068031.jpg +Places365_test_00068033.jpg +Places365_test_00068043.jpg +Places365_test_00068062.jpg +Places365_test_00068073.jpg +Places365_test_00068079.jpg +Places365_test_00068091.jpg +Places365_test_00068093.jpg +Places365_test_00068099.jpg +Places365_test_00068112.jpg +Places365_test_00068113.jpg +Places365_test_00068158.jpg +Places365_test_00068161.jpg +Places365_test_00068171.jpg +Places365_test_00068176.jpg +Places365_test_00068180.jpg +Places365_test_00068182.jpg +Places365_test_00068198.jpg +Places365_test_00068207.jpg +Places365_test_00068229.jpg +Places365_test_00068247.jpg +Places365_test_00068249.jpg +Places365_test_00068277.jpg +Places365_test_00068305.jpg +Places365_test_00068327.jpg +Places365_test_00068331.jpg +Places365_test_00068333.jpg +Places365_test_00068356.jpg +Places365_test_00068358.jpg +Places365_test_00068381.jpg +Places365_test_00068413.jpg +Places365_test_00068423.jpg +Places365_test_00068429.jpg +Places365_test_00068436.jpg +Places365_test_00068483.jpg +Places365_test_00068502.jpg +Places365_test_00068515.jpg +Places365_test_00068520.jpg +Places365_test_00068521.jpg +Places365_test_00068537.jpg +Places365_test_00068538.jpg +Places365_test_00068550.jpg +Places365_test_00068552.jpg +Places365_test_00068577.jpg +Places365_test_00068610.jpg +Places365_test_00068611.jpg +Places365_test_00068612.jpg +Places365_test_00068616.jpg +Places365_test_00068640.jpg +Places365_test_00068656.jpg +Places365_test_00068666.jpg +Places365_test_00068669.jpg +Places365_test_00068684.jpg +Places365_test_00068688.jpg +Places365_test_00068697.jpg +Places365_test_00068700.jpg +Places365_test_00068709.jpg +Places365_test_00068738.jpg +Places365_test_00068771.jpg +Places365_test_00068776.jpg +Places365_test_00068777.jpg +Places365_test_00068811.jpg +Places365_test_00068823.jpg +Places365_test_00068827.jpg +Places365_test_00068830.jpg +Places365_test_00068843.jpg +Places365_test_00068849.jpg +Places365_test_00068851.jpg +Places365_test_00068856.jpg +Places365_test_00068862.jpg +Places365_test_00068868.jpg +Places365_test_00068870.jpg +Places365_test_00068877.jpg +Places365_test_00068900.jpg +Places365_test_00068924.jpg +Places365_test_00068974.jpg +Places365_test_00068976.jpg +Places365_test_00068981.jpg +Places365_test_00069012.jpg +Places365_test_00069013.jpg +Places365_test_00069036.jpg +Places365_test_00069079.jpg +Places365_test_00069082.jpg +Places365_test_00069085.jpg +Places365_test_00069090.jpg +Places365_test_00069093.jpg +Places365_test_00069106.jpg +Places365_test_00069113.jpg +Places365_test_00069115.jpg +Places365_test_00069126.jpg +Places365_test_00069132.jpg +Places365_test_00069135.jpg +Places365_test_00069140.jpg +Places365_test_00069183.jpg +Places365_test_00069253.jpg +Places365_test_00069256.jpg +Places365_test_00069267.jpg +Places365_test_00069288.jpg +Places365_test_00069289.jpg +Places365_test_00069290.jpg +Places365_test_00069305.jpg +Places365_test_00069313.jpg +Places365_test_00069328.jpg +Places365_test_00069336.jpg +Places365_test_00069376.jpg +Places365_test_00069377.jpg +Places365_test_00069388.jpg +Places365_test_00069390.jpg +Places365_test_00069394.jpg +Places365_test_00069405.jpg +Places365_test_00069408.jpg +Places365_test_00069420.jpg +Places365_test_00069434.jpg +Places365_test_00069452.jpg +Places365_test_00069472.jpg +Places365_test_00069487.jpg +Places365_test_00069497.jpg +Places365_test_00069498.jpg +Places365_test_00069502.jpg +Places365_test_00069509.jpg +Places365_test_00069527.jpg +Places365_test_00069528.jpg +Places365_test_00069536.jpg +Places365_test_00069544.jpg +Places365_test_00069546.jpg +Places365_test_00069553.jpg +Places365_test_00069564.jpg +Places365_test_00069578.jpg +Places365_test_00069585.jpg +Places365_test_00069586.jpg +Places365_test_00069587.jpg +Places365_test_00069600.jpg +Places365_test_00069603.jpg +Places365_test_00069605.jpg +Places365_test_00069613.jpg +Places365_test_00069615.jpg +Places365_test_00069626.jpg +Places365_test_00069630.jpg +Places365_test_00069644.jpg +Places365_test_00069675.jpg +Places365_test_00069680.jpg +Places365_test_00069688.jpg +Places365_test_00069694.jpg +Places365_test_00069699.jpg +Places365_test_00069706.jpg +Places365_test_00069735.jpg +Places365_test_00069768.jpg +Places365_test_00069776.jpg +Places365_test_00069796.jpg +Places365_test_00069807.jpg +Places365_test_00069812.jpg +Places365_test_00069818.jpg +Places365_test_00069831.jpg +Places365_test_00069859.jpg +Places365_test_00069866.jpg +Places365_test_00069868.jpg +Places365_test_00069893.jpg +Places365_test_00069923.jpg +Places365_test_00069925.jpg +Places365_test_00069929.jpg +Places365_test_00069941.jpg +Places365_test_00069942.jpg +Places365_test_00069945.jpg +Places365_test_00069953.jpg +Places365_test_00069975.jpg +Places365_test_00069986.jpg +Places365_test_00069992.jpg +Places365_test_00070014.jpg +Places365_test_00070016.jpg +Places365_test_00070046.jpg +Places365_test_00070053.jpg +Places365_test_00070055.jpg +Places365_test_00070056.jpg +Places365_test_00070089.jpg +Places365_test_00070093.jpg +Places365_test_00070100.jpg +Places365_test_00070106.jpg +Places365_test_00070107.jpg +Places365_test_00070113.jpg +Places365_test_00070117.jpg +Places365_test_00070137.jpg +Places365_test_00070146.jpg +Places365_test_00070149.jpg +Places365_test_00070181.jpg +Places365_test_00070199.jpg +Places365_test_00070213.jpg +Places365_test_00070216.jpg +Places365_test_00070219.jpg +Places365_test_00070254.jpg +Places365_test_00070261.jpg +Places365_test_00070284.jpg +Places365_test_00070300.jpg +Places365_test_00070307.jpg +Places365_test_00070319.jpg +Places365_test_00070325.jpg +Places365_test_00070347.jpg +Places365_test_00070366.jpg +Places365_test_00070374.jpg +Places365_test_00070397.jpg +Places365_test_00070398.jpg +Places365_test_00070409.jpg +Places365_test_00070411.jpg +Places365_test_00070412.jpg +Places365_test_00070438.jpg +Places365_test_00070445.jpg +Places365_test_00070448.jpg +Places365_test_00070456.jpg +Places365_test_00070472.jpg +Places365_test_00070473.jpg +Places365_test_00070483.jpg +Places365_test_00070532.jpg +Places365_test_00070543.jpg +Places365_test_00070554.jpg +Places365_test_00070555.jpg +Places365_test_00070562.jpg +Places365_test_00070579.jpg +Places365_test_00070584.jpg +Places365_test_00070600.jpg +Places365_test_00070616.jpg +Places365_test_00070636.jpg +Places365_test_00070669.jpg +Places365_test_00070680.jpg +Places365_test_00070681.jpg +Places365_test_00070685.jpg +Places365_test_00070712.jpg +Places365_test_00070714.jpg +Places365_test_00070715.jpg +Places365_test_00070717.jpg +Places365_test_00070732.jpg +Places365_test_00070738.jpg +Places365_test_00070748.jpg +Places365_test_00070770.jpg +Places365_test_00070777.jpg +Places365_test_00070778.jpg +Places365_test_00070779.jpg +Places365_test_00070783.jpg +Places365_test_00070803.jpg +Places365_test_00070815.jpg +Places365_test_00070818.jpg +Places365_test_00070824.jpg +Places365_test_00070839.jpg +Places365_test_00070844.jpg +Places365_test_00070864.jpg +Places365_test_00070874.jpg +Places365_test_00070875.jpg +Places365_test_00070886.jpg +Places365_test_00070949.jpg +Places365_test_00070961.jpg +Places365_test_00070967.jpg +Places365_test_00070968.jpg +Places365_test_00070972.jpg +Places365_test_00070989.jpg +Places365_test_00070990.jpg +Places365_test_00070997.jpg +Places365_test_00071013.jpg +Places365_test_00071031.jpg +Places365_test_00071032.jpg +Places365_test_00071038.jpg +Places365_test_00071040.jpg +Places365_test_00071046.jpg +Places365_test_00071051.jpg +Places365_test_00071058.jpg +Places365_test_00071089.jpg +Places365_test_00071095.jpg +Places365_test_00071097.jpg +Places365_test_00071101.jpg +Places365_test_00071113.jpg +Places365_test_00071121.jpg +Places365_test_00071126.jpg +Places365_test_00071128.jpg +Places365_test_00071140.jpg +Places365_test_00071152.jpg +Places365_test_00071155.jpg +Places365_test_00071158.jpg +Places365_test_00071172.jpg +Places365_test_00071174.jpg +Places365_test_00071180.jpg +Places365_test_00071187.jpg +Places365_test_00071190.jpg +Places365_test_00071222.jpg +Places365_test_00071226.jpg +Places365_test_00071232.jpg +Places365_test_00071234.jpg +Places365_test_00071242.jpg +Places365_test_00071249.jpg +Places365_test_00071254.jpg +Places365_test_00071266.jpg +Places365_test_00071284.jpg +Places365_test_00071299.jpg +Places365_test_00071301.jpg +Places365_test_00071303.jpg +Places365_test_00071312.jpg +Places365_test_00071324.jpg +Places365_test_00071337.jpg +Places365_test_00071338.jpg +Places365_test_00071341.jpg +Places365_test_00071344.jpg +Places365_test_00071350.jpg +Places365_test_00071356.jpg +Places365_test_00071362.jpg +Places365_test_00071370.jpg +Places365_test_00071378.jpg +Places365_test_00071403.jpg +Places365_test_00071412.jpg +Places365_test_00071418.jpg +Places365_test_00071433.jpg +Places365_test_00071437.jpg +Places365_test_00071451.jpg +Places365_test_00071481.jpg +Places365_test_00071485.jpg +Places365_test_00071496.jpg +Places365_test_00071507.jpg +Places365_test_00071523.jpg +Places365_test_00071535.jpg +Places365_test_00071538.jpg +Places365_test_00071574.jpg +Places365_test_00071575.jpg +Places365_test_00071593.jpg +Places365_test_00071595.jpg +Places365_test_00071597.jpg +Places365_test_00071598.jpg +Places365_test_00071607.jpg +Places365_test_00071625.jpg +Places365_test_00071673.jpg +Places365_test_00071682.jpg +Places365_test_00071703.jpg +Places365_test_00071705.jpg +Places365_test_00071721.jpg +Places365_test_00071725.jpg +Places365_test_00071732.jpg +Places365_test_00071748.jpg +Places365_test_00071749.jpg +Places365_test_00071751.jpg +Places365_test_00071756.jpg +Places365_test_00071765.jpg +Places365_test_00071778.jpg +Places365_test_00071789.jpg +Places365_test_00071808.jpg +Places365_test_00071812.jpg +Places365_test_00071822.jpg +Places365_test_00071830.jpg +Places365_test_00071838.jpg +Places365_test_00071840.jpg +Places365_test_00071846.jpg +Places365_test_00071851.jpg +Places365_test_00071862.jpg +Places365_test_00071872.jpg +Places365_test_00071887.jpg +Places365_test_00071889.jpg +Places365_test_00071909.jpg +Places365_test_00071918.jpg +Places365_test_00071931.jpg +Places365_test_00071961.jpg +Places365_test_00071966.jpg +Places365_test_00071982.jpg +Places365_test_00071988.jpg +Places365_test_00072003.jpg +Places365_test_00072010.jpg +Places365_test_00072014.jpg +Places365_test_00072016.jpg +Places365_test_00072026.jpg +Places365_test_00072027.jpg +Places365_test_00072041.jpg +Places365_test_00072059.jpg +Places365_test_00072064.jpg +Places365_test_00072077.jpg +Places365_test_00072081.jpg +Places365_test_00072086.jpg +Places365_test_00072093.jpg +Places365_test_00072106.jpg +Places365_test_00072108.jpg +Places365_test_00072119.jpg +Places365_test_00072122.jpg +Places365_test_00072149.jpg +Places365_test_00072172.jpg +Places365_test_00072182.jpg +Places365_test_00072185.jpg +Places365_test_00072189.jpg +Places365_test_00072191.jpg +Places365_test_00072220.jpg +Places365_test_00072231.jpg +Places365_test_00072248.jpg +Places365_test_00072285.jpg +Places365_test_00072312.jpg +Places365_test_00072323.jpg +Places365_test_00072324.jpg +Places365_test_00072326.jpg +Places365_test_00072333.jpg +Places365_test_00072357.jpg +Places365_test_00072380.jpg +Places365_test_00072394.jpg +Places365_test_00072397.jpg +Places365_test_00072411.jpg +Places365_test_00072413.jpg +Places365_test_00072418.jpg +Places365_test_00072443.jpg +Places365_test_00072465.jpg +Places365_test_00072467.jpg +Places365_test_00072498.jpg +Places365_test_00072526.jpg +Places365_test_00072538.jpg +Places365_test_00072542.jpg +Places365_test_00072562.jpg +Places365_test_00072565.jpg +Places365_test_00072576.jpg +Places365_test_00072578.jpg +Places365_test_00072585.jpg +Places365_test_00072621.jpg +Places365_test_00072624.jpg +Places365_test_00072628.jpg +Places365_test_00072647.jpg +Places365_test_00072648.jpg +Places365_test_00072656.jpg +Places365_test_00072662.jpg +Places365_test_00072675.jpg +Places365_test_00072692.jpg +Places365_test_00072700.jpg +Places365_test_00072723.jpg +Places365_test_00072732.jpg +Places365_test_00072749.jpg +Places365_test_00072758.jpg +Places365_test_00072782.jpg +Places365_test_00072783.jpg +Places365_test_00072787.jpg +Places365_test_00072793.jpg +Places365_test_00072796.jpg +Places365_test_00072797.jpg +Places365_test_00072804.jpg +Places365_test_00072807.jpg +Places365_test_00072808.jpg +Places365_test_00072823.jpg +Places365_test_00072825.jpg +Places365_test_00072847.jpg +Places365_test_00072865.jpg +Places365_test_00072866.jpg +Places365_test_00072868.jpg +Places365_test_00072874.jpg +Places365_test_00072877.jpg +Places365_test_00072879.jpg +Places365_test_00072880.jpg +Places365_test_00072887.jpg +Places365_test_00072919.jpg +Places365_test_00072955.jpg +Places365_test_00072964.jpg +Places365_test_00072968.jpg +Places365_test_00072971.jpg +Places365_test_00072976.jpg +Places365_test_00073006.jpg +Places365_test_00073026.jpg +Places365_test_00073030.jpg +Places365_test_00073040.jpg +Places365_test_00073051.jpg +Places365_test_00073057.jpg +Places365_test_00073081.jpg +Places365_test_00073098.jpg +Places365_test_00073107.jpg +Places365_test_00073118.jpg +Places365_test_00073121.jpg +Places365_test_00073133.jpg +Places365_test_00073134.jpg +Places365_test_00073135.jpg +Places365_test_00073148.jpg +Places365_test_00073151.jpg +Places365_test_00073161.jpg +Places365_test_00073175.jpg +Places365_test_00073187.jpg +Places365_test_00073213.jpg +Places365_test_00073220.jpg +Places365_test_00073223.jpg +Places365_test_00073239.jpg +Places365_test_00073259.jpg +Places365_test_00073262.jpg +Places365_test_00073290.jpg +Places365_test_00073298.jpg +Places365_test_00073299.jpg +Places365_test_00073303.jpg +Places365_test_00073320.jpg +Places365_test_00073329.jpg +Places365_test_00073334.jpg +Places365_test_00073343.jpg +Places365_test_00073354.jpg +Places365_test_00073378.jpg +Places365_test_00073388.jpg +Places365_test_00073400.jpg +Places365_test_00073401.jpg +Places365_test_00073414.jpg +Places365_test_00073420.jpg +Places365_test_00073423.jpg +Places365_test_00073435.jpg +Places365_test_00073439.jpg +Places365_test_00073440.jpg +Places365_test_00073441.jpg +Places365_test_00073456.jpg +Places365_test_00073473.jpg +Places365_test_00073481.jpg +Places365_test_00073487.jpg +Places365_test_00073496.jpg +Places365_test_00073497.jpg +Places365_test_00073519.jpg +Places365_test_00073556.jpg +Places365_test_00073571.jpg +Places365_test_00073579.jpg +Places365_test_00073581.jpg +Places365_test_00073588.jpg +Places365_test_00073595.jpg +Places365_test_00073601.jpg +Places365_test_00073626.jpg +Places365_test_00073629.jpg +Places365_test_00073644.jpg +Places365_test_00073658.jpg +Places365_test_00073674.jpg +Places365_test_00073675.jpg +Places365_test_00073681.jpg +Places365_test_00073693.jpg +Places365_test_00073694.jpg +Places365_test_00073696.jpg +Places365_test_00073721.jpg +Places365_test_00073733.jpg +Places365_test_00073789.jpg +Places365_test_00073802.jpg +Places365_test_00073803.jpg +Places365_test_00073814.jpg +Places365_test_00073831.jpg +Places365_test_00073841.jpg +Places365_test_00073842.jpg +Places365_test_00073850.jpg +Places365_test_00073853.jpg +Places365_test_00073856.jpg +Places365_test_00073861.jpg +Places365_test_00073879.jpg +Places365_test_00073892.jpg +Places365_test_00073898.jpg +Places365_test_00073910.jpg +Places365_test_00073925.jpg +Places365_test_00073927.jpg +Places365_test_00073955.jpg +Places365_test_00073958.jpg +Places365_test_00073970.jpg +Places365_test_00073972.jpg +Places365_test_00073982.jpg +Places365_test_00073993.jpg +Places365_test_00073997.jpg +Places365_test_00073999.jpg +Places365_test_00074009.jpg +Places365_test_00074010.jpg +Places365_test_00074022.jpg +Places365_test_00074030.jpg +Places365_test_00074037.jpg +Places365_test_00074053.jpg +Places365_test_00074058.jpg +Places365_test_00074140.jpg +Places365_test_00074143.jpg +Places365_test_00074144.jpg +Places365_test_00074167.jpg +Places365_test_00074176.jpg +Places365_test_00074181.jpg +Places365_test_00074186.jpg +Places365_test_00074190.jpg +Places365_test_00074191.jpg +Places365_test_00074207.jpg +Places365_test_00074216.jpg +Places365_test_00074227.jpg +Places365_test_00074231.jpg +Places365_test_00074240.jpg +Places365_test_00074245.jpg +Places365_test_00074247.jpg +Places365_test_00074262.jpg +Places365_test_00074263.jpg +Places365_test_00074278.jpg +Places365_test_00074283.jpg +Places365_test_00074286.jpg +Places365_test_00074316.jpg +Places365_test_00074337.jpg +Places365_test_00074338.jpg +Places365_test_00074357.jpg +Places365_test_00074367.jpg +Places365_test_00074369.jpg +Places365_test_00074374.jpg +Places365_test_00074392.jpg +Places365_test_00074396.jpg +Places365_test_00074421.jpg +Places365_test_00074443.jpg +Places365_test_00074444.jpg +Places365_test_00074453.jpg +Places365_test_00074458.jpg +Places365_test_00074462.jpg +Places365_test_00074479.jpg +Places365_test_00074502.jpg +Places365_test_00074538.jpg +Places365_test_00074554.jpg +Places365_test_00074567.jpg +Places365_test_00074569.jpg +Places365_test_00074572.jpg +Places365_test_00074582.jpg +Places365_test_00074584.jpg +Places365_test_00074595.jpg +Places365_test_00074597.jpg +Places365_test_00074627.jpg +Places365_test_00074650.jpg +Places365_test_00074670.jpg +Places365_test_00074680.jpg +Places365_test_00074682.jpg +Places365_test_00074692.jpg +Places365_test_00074693.jpg +Places365_test_00074699.jpg +Places365_test_00074702.jpg +Places365_test_00074703.jpg +Places365_test_00074704.jpg +Places365_test_00074709.jpg +Places365_test_00074711.jpg +Places365_test_00074723.jpg +Places365_test_00074724.jpg +Places365_test_00074744.jpg +Places365_test_00074751.jpg +Places365_test_00074754.jpg +Places365_test_00074760.jpg +Places365_test_00074765.jpg +Places365_test_00074784.jpg +Places365_test_00074789.jpg +Places365_test_00074793.jpg +Places365_test_00074803.jpg +Places365_test_00074830.jpg +Places365_test_00074858.jpg +Places365_test_00074861.jpg +Places365_test_00074866.jpg +Places365_test_00074889.jpg +Places365_test_00074893.jpg +Places365_test_00074899.jpg +Places365_test_00074927.jpg +Places365_test_00074944.jpg +Places365_test_00074946.jpg +Places365_test_00074951.jpg +Places365_test_00074963.jpg +Places365_test_00074967.jpg +Places365_test_00074969.jpg +Places365_test_00075014.jpg +Places365_test_00075028.jpg +Places365_test_00075033.jpg +Places365_test_00075034.jpg +Places365_test_00075038.jpg +Places365_test_00075051.jpg +Places365_test_00075060.jpg +Places365_test_00075069.jpg +Places365_test_00075079.jpg +Places365_test_00075089.jpg +Places365_test_00075091.jpg +Places365_test_00075105.jpg +Places365_test_00075123.jpg +Places365_test_00075125.jpg +Places365_test_00075127.jpg +Places365_test_00075132.jpg +Places365_test_00075140.jpg +Places365_test_00075154.jpg +Places365_test_00075156.jpg +Places365_test_00075158.jpg +Places365_test_00075174.jpg +Places365_test_00075188.jpg +Places365_test_00075195.jpg +Places365_test_00075196.jpg +Places365_test_00075200.jpg +Places365_test_00075212.jpg +Places365_test_00075213.jpg +Places365_test_00075227.jpg +Places365_test_00075262.jpg +Places365_test_00075263.jpg +Places365_test_00075275.jpg +Places365_test_00075292.jpg +Places365_test_00075293.jpg +Places365_test_00075300.jpg +Places365_test_00075312.jpg +Places365_test_00075317.jpg +Places365_test_00075342.jpg +Places365_test_00075349.jpg +Places365_test_00075394.jpg +Places365_test_00075398.jpg +Places365_test_00075421.jpg +Places365_test_00075424.jpg +Places365_test_00075430.jpg +Places365_test_00075433.jpg +Places365_test_00075441.jpg +Places365_test_00075460.jpg +Places365_test_00075472.jpg +Places365_test_00075475.jpg +Places365_test_00075477.jpg +Places365_test_00075478.jpg +Places365_test_00075483.jpg +Places365_test_00075490.jpg +Places365_test_00075491.jpg +Places365_test_00075493.jpg +Places365_test_00075496.jpg +Places365_test_00075509.jpg +Places365_test_00075516.jpg +Places365_test_00075520.jpg +Places365_test_00075524.jpg +Places365_test_00075531.jpg +Places365_test_00075534.jpg +Places365_test_00075594.jpg +Places365_test_00075600.jpg +Places365_test_00075603.jpg +Places365_test_00075624.jpg +Places365_test_00075649.jpg +Places365_test_00075657.jpg +Places365_test_00075677.jpg +Places365_test_00075696.jpg +Places365_test_00075732.jpg +Places365_test_00075759.jpg +Places365_test_00075771.jpg +Places365_test_00075772.jpg +Places365_test_00075800.jpg +Places365_test_00075831.jpg +Places365_test_00075835.jpg +Places365_test_00075878.jpg +Places365_test_00075888.jpg +Places365_test_00075897.jpg +Places365_test_00075910.jpg +Places365_test_00075924.jpg +Places365_test_00075930.jpg +Places365_test_00075932.jpg +Places365_test_00075949.jpg +Places365_test_00075960.jpg +Places365_test_00075961.jpg +Places365_test_00075978.jpg +Places365_test_00075979.jpg +Places365_test_00075981.jpg +Places365_test_00076016.jpg +Places365_test_00076028.jpg +Places365_test_00076034.jpg +Places365_test_00076036.jpg +Places365_test_00076073.jpg +Places365_test_00076085.jpg +Places365_test_00076113.jpg +Places365_test_00076133.jpg +Places365_test_00076134.jpg +Places365_test_00076135.jpg +Places365_test_00076150.jpg +Places365_test_00076160.jpg +Places365_test_00076168.jpg +Places365_test_00076202.jpg +Places365_test_00076205.jpg +Places365_test_00076212.jpg +Places365_test_00076217.jpg +Places365_test_00076221.jpg +Places365_test_00076226.jpg +Places365_test_00076233.jpg +Places365_test_00076234.jpg +Places365_test_00076242.jpg +Places365_test_00076284.jpg +Places365_test_00076285.jpg +Places365_test_00076287.jpg +Places365_test_00076299.jpg +Places365_test_00076305.jpg +Places365_test_00076323.jpg +Places365_test_00076328.jpg +Places365_test_00076330.jpg +Places365_test_00076332.jpg +Places365_test_00076336.jpg +Places365_test_00076371.jpg +Places365_test_00076380.jpg +Places365_test_00076385.jpg +Places365_test_00076390.jpg +Places365_test_00076392.jpg +Places365_test_00076398.jpg +Places365_test_00076407.jpg +Places365_test_00076411.jpg +Places365_test_00076415.jpg +Places365_test_00076423.jpg +Places365_test_00076440.jpg +Places365_test_00076444.jpg +Places365_test_00076456.jpg +Places365_test_00076458.jpg +Places365_test_00076482.jpg +Places365_test_00076493.jpg +Places365_test_00076494.jpg +Places365_test_00076503.jpg +Places365_test_00076505.jpg +Places365_test_00076517.jpg +Places365_test_00076537.jpg +Places365_test_00076540.jpg +Places365_test_00076548.jpg +Places365_test_00076551.jpg +Places365_test_00076565.jpg +Places365_test_00076572.jpg +Places365_test_00076587.jpg +Places365_test_00076592.jpg +Places365_test_00076618.jpg +Places365_test_00076620.jpg +Places365_test_00076638.jpg +Places365_test_00076659.jpg +Places365_test_00076664.jpg +Places365_test_00076670.jpg +Places365_test_00076701.jpg +Places365_test_00076713.jpg +Places365_test_00076717.jpg +Places365_test_00076732.jpg +Places365_test_00076733.jpg +Places365_test_00076746.jpg +Places365_test_00076748.jpg +Places365_test_00076749.jpg +Places365_test_00076761.jpg +Places365_test_00076762.jpg +Places365_test_00076768.jpg +Places365_test_00076771.jpg +Places365_test_00076786.jpg +Places365_test_00076789.jpg +Places365_test_00076790.jpg +Places365_test_00076803.jpg +Places365_test_00076809.jpg +Places365_test_00076842.jpg +Places365_test_00076857.jpg +Places365_test_00076859.jpg +Places365_test_00076866.jpg +Places365_test_00076874.jpg +Places365_test_00076880.jpg +Places365_test_00076883.jpg +Places365_test_00076884.jpg +Places365_test_00076897.jpg +Places365_test_00076912.jpg +Places365_test_00076921.jpg +Places365_test_00076930.jpg +Places365_test_00076935.jpg +Places365_test_00076944.jpg +Places365_test_00076952.jpg +Places365_test_00076955.jpg +Places365_test_00076964.jpg +Places365_test_00076976.jpg +Places365_test_00076980.jpg +Places365_test_00076984.jpg +Places365_test_00077004.jpg +Places365_test_00077057.jpg +Places365_test_00077061.jpg +Places365_test_00077062.jpg +Places365_test_00077070.jpg +Places365_test_00077077.jpg +Places365_test_00077080.jpg +Places365_test_00077085.jpg +Places365_test_00077086.jpg +Places365_test_00077087.jpg +Places365_test_00077097.jpg +Places365_test_00077103.jpg +Places365_test_00077118.jpg +Places365_test_00077119.jpg +Places365_test_00077127.jpg +Places365_test_00077152.jpg +Places365_test_00077153.jpg +Places365_test_00077154.jpg +Places365_test_00077191.jpg +Places365_test_00077201.jpg +Places365_test_00077231.jpg +Places365_test_00077242.jpg +Places365_test_00077252.jpg +Places365_test_00077254.jpg +Places365_test_00077265.jpg +Places365_test_00077270.jpg +Places365_test_00077295.jpg +Places365_test_00077297.jpg +Places365_test_00077314.jpg +Places365_test_00077318.jpg +Places365_test_00077322.jpg +Places365_test_00077324.jpg +Places365_test_00077326.jpg +Places365_test_00077331.jpg +Places365_test_00077359.jpg +Places365_test_00077370.jpg +Places365_test_00077373.jpg +Places365_test_00077386.jpg +Places365_test_00077390.jpg +Places365_test_00077403.jpg +Places365_test_00077465.jpg +Places365_test_00077472.jpg +Places365_test_00077484.jpg +Places365_test_00077507.jpg +Places365_test_00077527.jpg +Places365_test_00077552.jpg +Places365_test_00077557.jpg +Places365_test_00077566.jpg +Places365_test_00077570.jpg +Places365_test_00077571.jpg +Places365_test_00077582.jpg +Places365_test_00077607.jpg +Places365_test_00077619.jpg +Places365_test_00077623.jpg +Places365_test_00077659.jpg +Places365_test_00077670.jpg +Places365_test_00077671.jpg +Places365_test_00077683.jpg +Places365_test_00077688.jpg +Places365_test_00077698.jpg +Places365_test_00077715.jpg +Places365_test_00077730.jpg +Places365_test_00077752.jpg +Places365_test_00077769.jpg +Places365_test_00077795.jpg +Places365_test_00077807.jpg +Places365_test_00077826.jpg +Places365_test_00077830.jpg +Places365_test_00077856.jpg +Places365_test_00077861.jpg +Places365_test_00077869.jpg +Places365_test_00077884.jpg +Places365_test_00077904.jpg +Places365_test_00077911.jpg +Places365_test_00077916.jpg +Places365_test_00077938.jpg +Places365_test_00077941.jpg +Places365_test_00077972.jpg +Places365_test_00077981.jpg +Places365_test_00077996.jpg +Places365_test_00077998.jpg +Places365_test_00078000.jpg +Places365_test_00078001.jpg +Places365_test_00078014.jpg +Places365_test_00078020.jpg +Places365_test_00078030.jpg +Places365_test_00078046.jpg +Places365_test_00078073.jpg +Places365_test_00078088.jpg +Places365_test_00078098.jpg +Places365_test_00078099.jpg +Places365_test_00078141.jpg +Places365_test_00078176.jpg +Places365_test_00078198.jpg +Places365_test_00078209.jpg +Places365_test_00078220.jpg +Places365_test_00078230.jpg +Places365_test_00078260.jpg +Places365_test_00078263.jpg +Places365_test_00078266.jpg +Places365_test_00078269.jpg +Places365_test_00078281.jpg +Places365_test_00078283.jpg +Places365_test_00078319.jpg +Places365_test_00078323.jpg +Places365_test_00078329.jpg +Places365_test_00078330.jpg +Places365_test_00078342.jpg +Places365_test_00078383.jpg +Places365_test_00078412.jpg +Places365_test_00078428.jpg +Places365_test_00078437.jpg +Places365_test_00078442.jpg +Places365_test_00078448.jpg +Places365_test_00078453.jpg +Places365_test_00078470.jpg +Places365_test_00078494.jpg +Places365_test_00078501.jpg +Places365_test_00078505.jpg +Places365_test_00078535.jpg +Places365_test_00078541.jpg +Places365_test_00078572.jpg +Places365_test_00078618.jpg +Places365_test_00078635.jpg +Places365_test_00078640.jpg +Places365_test_00078669.jpg +Places365_test_00078671.jpg +Places365_test_00078675.jpg +Places365_test_00078692.jpg +Places365_test_00078703.jpg +Places365_test_00078706.jpg +Places365_test_00078708.jpg +Places365_test_00078709.jpg +Places365_test_00078712.jpg +Places365_test_00078730.jpg +Places365_test_00078735.jpg +Places365_test_00078747.jpg +Places365_test_00078759.jpg +Places365_test_00078777.jpg +Places365_test_00078798.jpg +Places365_test_00078815.jpg +Places365_test_00078823.jpg +Places365_test_00078839.jpg +Places365_test_00078840.jpg +Places365_test_00078843.jpg +Places365_test_00078905.jpg +Places365_test_00078910.jpg +Places365_test_00078919.jpg +Places365_test_00078941.jpg +Places365_test_00078946.jpg +Places365_test_00078947.jpg +Places365_test_00078977.jpg +Places365_test_00078978.jpg +Places365_test_00078983.jpg +Places365_test_00078988.jpg +Places365_test_00078996.jpg +Places365_test_00079000.jpg +Places365_test_00079015.jpg +Places365_test_00079024.jpg +Places365_test_00079037.jpg +Places365_test_00079041.jpg +Places365_test_00079044.jpg +Places365_test_00079049.jpg +Places365_test_00079054.jpg +Places365_test_00079087.jpg +Places365_test_00079116.jpg +Places365_test_00079153.jpg +Places365_test_00079156.jpg +Places365_test_00079161.jpg +Places365_test_00079171.jpg +Places365_test_00079174.jpg +Places365_test_00079179.jpg +Places365_test_00079210.jpg +Places365_test_00079222.jpg +Places365_test_00079230.jpg +Places365_test_00079235.jpg +Places365_test_00079236.jpg +Places365_test_00079237.jpg +Places365_test_00079266.jpg +Places365_test_00079270.jpg +Places365_test_00079273.jpg +Places365_test_00079280.jpg +Places365_test_00079285.jpg +Places365_test_00079299.jpg +Places365_test_00079302.jpg +Places365_test_00079307.jpg +Places365_test_00079321.jpg +Places365_test_00079323.jpg +Places365_test_00079343.jpg +Places365_test_00079344.jpg +Places365_test_00079369.jpg +Places365_test_00079376.jpg +Places365_test_00079406.jpg +Places365_test_00079420.jpg +Places365_test_00079430.jpg +Places365_test_00079440.jpg +Places365_test_00079447.jpg +Places365_test_00079450.jpg +Places365_test_00079466.jpg +Places365_test_00079473.jpg +Places365_test_00079482.jpg +Places365_test_00079505.jpg +Places365_test_00079509.jpg +Places365_test_00079513.jpg +Places365_test_00079522.jpg +Places365_test_00079527.jpg +Places365_test_00079535.jpg +Places365_test_00079573.jpg +Places365_test_00079591.jpg +Places365_test_00079597.jpg +Places365_test_00079610.jpg +Places365_test_00079611.jpg +Places365_test_00079614.jpg +Places365_test_00079615.jpg +Places365_test_00079616.jpg +Places365_test_00079619.jpg +Places365_test_00079684.jpg +Places365_test_00079689.jpg +Places365_test_00079733.jpg +Places365_test_00079737.jpg +Places365_test_00079745.jpg +Places365_test_00079761.jpg +Places365_test_00079764.jpg +Places365_test_00079770.jpg +Places365_test_00079773.jpg +Places365_test_00079815.jpg +Places365_test_00079816.jpg +Places365_test_00079850.jpg +Places365_test_00079853.jpg +Places365_test_00079855.jpg +Places365_test_00079871.jpg +Places365_test_00079880.jpg +Places365_test_00079885.jpg +Places365_test_00079887.jpg +Places365_test_00079893.jpg +Places365_test_00079897.jpg +Places365_test_00079911.jpg +Places365_test_00079923.jpg +Places365_test_00079961.jpg +Places365_test_00079987.jpg +Places365_test_00080010.jpg +Places365_test_00080011.jpg +Places365_test_00080013.jpg +Places365_test_00080014.jpg +Places365_test_00080049.jpg +Places365_test_00080079.jpg +Places365_test_00080091.jpg +Places365_test_00080096.jpg +Places365_test_00080104.jpg +Places365_test_00080122.jpg +Places365_test_00080151.jpg +Places365_test_00080165.jpg +Places365_test_00080166.jpg +Places365_test_00080167.jpg +Places365_test_00080189.jpg +Places365_test_00080206.jpg +Places365_test_00080223.jpg +Places365_test_00080227.jpg +Places365_test_00080297.jpg +Places365_test_00080310.jpg +Places365_test_00080314.jpg +Places365_test_00080315.jpg +Places365_test_00080339.jpg +Places365_test_00080340.jpg +Places365_test_00080344.jpg +Places365_test_00080349.jpg +Places365_test_00080354.jpg +Places365_test_00080358.jpg +Places365_test_00080366.jpg +Places365_test_00080438.jpg +Places365_test_00080439.jpg +Places365_test_00080447.jpg +Places365_test_00080450.jpg +Places365_test_00080480.jpg +Places365_test_00080482.jpg +Places365_test_00080499.jpg +Places365_test_00080508.jpg +Places365_test_00080509.jpg +Places365_test_00080523.jpg +Places365_test_00080534.jpg +Places365_test_00080535.jpg +Places365_test_00080548.jpg +Places365_test_00080556.jpg +Places365_test_00080577.jpg +Places365_test_00080581.jpg +Places365_test_00080600.jpg +Places365_test_00080623.jpg +Places365_test_00080627.jpg +Places365_test_00080636.jpg +Places365_test_00080649.jpg +Places365_test_00080653.jpg +Places365_test_00080681.jpg +Places365_test_00080682.jpg +Places365_test_00080683.jpg +Places365_test_00080693.jpg +Places365_test_00080698.jpg +Places365_test_00080712.jpg +Places365_test_00080719.jpg +Places365_test_00080780.jpg +Places365_test_00080786.jpg +Places365_test_00080792.jpg +Places365_test_00080794.jpg +Places365_test_00080809.jpg +Places365_test_00080813.jpg +Places365_test_00080843.jpg +Places365_test_00080850.jpg +Places365_test_00080867.jpg +Places365_test_00080874.jpg +Places365_test_00080877.jpg +Places365_test_00080889.jpg +Places365_test_00080897.jpg +Places365_test_00080927.jpg +Places365_test_00080933.jpg +Places365_test_00080939.jpg +Places365_test_00080945.jpg +Places365_test_00080958.jpg +Places365_test_00080960.jpg +Places365_test_00080965.jpg +Places365_test_00080969.jpg +Places365_test_00080978.jpg +Places365_test_00080993.jpg +Places365_test_00081012.jpg +Places365_test_00081017.jpg +Places365_test_00081018.jpg +Places365_test_00081038.jpg +Places365_test_00081041.jpg +Places365_test_00081079.jpg +Places365_test_00081084.jpg +Places365_test_00081091.jpg +Places365_test_00081106.jpg +Places365_test_00081116.jpg +Places365_test_00081123.jpg +Places365_test_00081143.jpg +Places365_test_00081144.jpg +Places365_test_00081150.jpg +Places365_test_00081174.jpg +Places365_test_00081184.jpg +Places365_test_00081188.jpg +Places365_test_00081222.jpg +Places365_test_00081229.jpg +Places365_test_00081246.jpg +Places365_test_00081267.jpg +Places365_test_00081274.jpg +Places365_test_00081283.jpg +Places365_test_00081288.jpg +Places365_test_00081295.jpg +Places365_test_00081296.jpg +Places365_test_00081298.jpg +Places365_test_00081308.jpg +Places365_test_00081331.jpg +Places365_test_00081337.jpg +Places365_test_00081370.jpg +Places365_test_00081372.jpg +Places365_test_00081373.jpg +Places365_test_00081377.jpg +Places365_test_00081380.jpg +Places365_test_00081381.jpg +Places365_test_00081389.jpg +Places365_test_00081392.jpg +Places365_test_00081405.jpg +Places365_test_00081407.jpg +Places365_test_00081408.jpg +Places365_test_00081409.jpg +Places365_test_00081410.jpg +Places365_test_00081418.jpg +Places365_test_00081425.jpg +Places365_test_00081427.jpg +Places365_test_00081428.jpg +Places365_test_00081429.jpg +Places365_test_00081452.jpg +Places365_test_00081454.jpg +Places365_test_00081457.jpg +Places365_test_00081469.jpg +Places365_test_00081470.jpg +Places365_test_00081504.jpg +Places365_test_00081545.jpg +Places365_test_00081558.jpg +Places365_test_00081575.jpg +Places365_test_00081588.jpg +Places365_test_00081593.jpg +Places365_test_00081597.jpg +Places365_test_00081598.jpg +Places365_test_00081621.jpg +Places365_test_00081623.jpg +Places365_test_00081625.jpg +Places365_test_00081628.jpg +Places365_test_00081629.jpg +Places365_test_00081632.jpg +Places365_test_00081641.jpg +Places365_test_00081654.jpg +Places365_test_00081661.jpg +Places365_test_00081662.jpg +Places365_test_00081676.jpg +Places365_test_00081678.jpg +Places365_test_00081679.jpg +Places365_test_00081698.jpg +Places365_test_00081704.jpg +Places365_test_00081728.jpg +Places365_test_00081745.jpg +Places365_test_00081752.jpg +Places365_test_00081754.jpg +Places365_test_00081765.jpg +Places365_test_00081766.jpg +Places365_test_00081789.jpg +Places365_test_00081809.jpg +Places365_test_00081837.jpg +Places365_test_00081845.jpg +Places365_test_00081852.jpg +Places365_test_00081877.jpg +Places365_test_00081909.jpg +Places365_test_00081923.jpg +Places365_test_00081933.jpg +Places365_test_00081940.jpg +Places365_test_00081946.jpg +Places365_test_00081958.jpg +Places365_test_00081962.jpg +Places365_test_00081978.jpg +Places365_test_00082015.jpg +Places365_test_00082016.jpg +Places365_test_00082019.jpg +Places365_test_00082044.jpg +Places365_test_00082052.jpg +Places365_test_00082059.jpg +Places365_test_00082061.jpg +Places365_test_00082064.jpg +Places365_test_00082085.jpg +Places365_test_00082108.jpg +Places365_test_00082112.jpg +Places365_test_00082127.jpg +Places365_test_00082145.jpg +Places365_test_00082149.jpg +Places365_test_00082150.jpg +Places365_test_00082153.jpg +Places365_test_00082192.jpg +Places365_test_00082197.jpg +Places365_test_00082207.jpg +Places365_test_00082223.jpg +Places365_test_00082228.jpg +Places365_test_00082230.jpg +Places365_test_00082241.jpg +Places365_test_00082254.jpg +Places365_test_00082263.jpg +Places365_test_00082266.jpg +Places365_test_00082299.jpg +Places365_test_00082323.jpg +Places365_test_00082325.jpg +Places365_test_00082326.jpg +Places365_test_00082329.jpg +Places365_test_00082330.jpg +Places365_test_00082355.jpg +Places365_test_00082357.jpg +Places365_test_00082370.jpg +Places365_test_00082383.jpg +Places365_test_00082431.jpg +Places365_test_00082433.jpg +Places365_test_00082441.jpg +Places365_test_00082443.jpg +Places365_test_00082465.jpg +Places365_test_00082485.jpg +Places365_test_00082497.jpg +Places365_test_00082504.jpg +Places365_test_00082515.jpg +Places365_test_00082527.jpg +Places365_test_00082547.jpg +Places365_test_00082549.jpg +Places365_test_00082569.jpg +Places365_test_00082592.jpg +Places365_test_00082635.jpg +Places365_test_00082642.jpg +Places365_test_00082658.jpg +Places365_test_00082667.jpg +Places365_test_00082670.jpg +Places365_test_00082682.jpg +Places365_test_00082695.jpg +Places365_test_00082696.jpg +Places365_test_00082707.jpg +Places365_test_00082748.jpg +Places365_test_00082758.jpg +Places365_test_00082767.jpg +Places365_test_00082779.jpg +Places365_test_00082790.jpg +Places365_test_00082794.jpg +Places365_test_00082809.jpg +Places365_test_00082810.jpg +Places365_test_00082826.jpg +Places365_test_00082842.jpg +Places365_test_00082854.jpg +Places365_test_00082875.jpg +Places365_test_00082879.jpg +Places365_test_00082904.jpg +Places365_test_00082917.jpg +Places365_test_00082919.jpg +Places365_test_00082922.jpg +Places365_test_00082948.jpg +Places365_test_00082950.jpg +Places365_test_00082955.jpg +Places365_test_00082969.jpg +Places365_test_00082983.jpg +Places365_test_00082987.jpg +Places365_test_00082988.jpg +Places365_test_00083020.jpg +Places365_test_00083033.jpg +Places365_test_00083037.jpg +Places365_test_00083042.jpg +Places365_test_00083046.jpg +Places365_test_00083052.jpg +Places365_test_00083055.jpg +Places365_test_00083062.jpg +Places365_test_00083077.jpg +Places365_test_00083085.jpg +Places365_test_00083086.jpg +Places365_test_00083096.jpg +Places365_test_00083098.jpg +Places365_test_00083103.jpg +Places365_test_00083115.jpg +Places365_test_00083118.jpg +Places365_test_00083159.jpg +Places365_test_00083163.jpg +Places365_test_00083198.jpg +Places365_test_00083202.jpg +Places365_test_00083218.jpg +Places365_test_00083234.jpg +Places365_test_00083246.jpg +Places365_test_00083259.jpg +Places365_test_00083288.jpg +Places365_test_00083305.jpg +Places365_test_00083306.jpg +Places365_test_00083327.jpg +Places365_test_00083360.jpg +Places365_test_00083365.jpg +Places365_test_00083373.jpg +Places365_test_00083391.jpg +Places365_test_00083406.jpg +Places365_test_00083414.jpg +Places365_test_00083459.jpg +Places365_test_00083472.jpg +Places365_test_00083477.jpg +Places365_test_00083480.jpg +Places365_test_00083495.jpg +Places365_test_00083501.jpg +Places365_test_00083502.jpg +Places365_test_00083508.jpg +Places365_test_00083512.jpg +Places365_test_00083514.jpg +Places365_test_00083517.jpg +Places365_test_00083519.jpg +Places365_test_00083552.jpg +Places365_test_00083555.jpg +Places365_test_00083560.jpg +Places365_test_00083587.jpg +Places365_test_00083591.jpg +Places365_test_00083612.jpg +Places365_test_00083613.jpg +Places365_test_00083629.jpg +Places365_test_00083635.jpg +Places365_test_00083639.jpg +Places365_test_00083647.jpg +Places365_test_00083650.jpg +Places365_test_00083667.jpg +Places365_test_00083678.jpg +Places365_test_00083685.jpg +Places365_test_00083697.jpg +Places365_test_00083698.jpg +Places365_test_00083703.jpg +Places365_test_00083718.jpg +Places365_test_00083726.jpg +Places365_test_00083731.jpg +Places365_test_00083735.jpg +Places365_test_00083745.jpg +Places365_test_00083751.jpg +Places365_test_00083780.jpg +Places365_test_00083807.jpg +Places365_test_00083813.jpg +Places365_test_00083814.jpg +Places365_test_00083818.jpg +Places365_test_00083819.jpg +Places365_test_00083833.jpg +Places365_test_00083834.jpg +Places365_test_00083845.jpg +Places365_test_00083850.jpg +Places365_test_00083856.jpg +Places365_test_00083885.jpg +Places365_test_00083894.jpg +Places365_test_00083902.jpg +Places365_test_00083909.jpg +Places365_test_00083934.jpg +Places365_test_00083937.jpg +Places365_test_00083942.jpg +Places365_test_00083943.jpg +Places365_test_00083967.jpg +Places365_test_00083970.jpg +Places365_test_00083976.jpg +Places365_test_00083982.jpg +Places365_test_00083987.jpg +Places365_test_00083995.jpg +Places365_test_00084002.jpg +Places365_test_00084027.jpg +Places365_test_00084043.jpg +Places365_test_00084045.jpg +Places365_test_00084056.jpg +Places365_test_00084058.jpg +Places365_test_00084068.jpg +Places365_test_00084080.jpg +Places365_test_00084081.jpg +Places365_test_00084083.jpg +Places365_test_00084095.jpg +Places365_test_00084114.jpg +Places365_test_00084125.jpg +Places365_test_00084130.jpg +Places365_test_00084147.jpg +Places365_test_00084153.jpg +Places365_test_00084154.jpg +Places365_test_00084156.jpg +Places365_test_00084164.jpg +Places365_test_00084177.jpg +Places365_test_00084183.jpg +Places365_test_00084185.jpg +Places365_test_00084186.jpg +Places365_test_00084195.jpg +Places365_test_00084208.jpg +Places365_test_00084225.jpg +Places365_test_00084228.jpg +Places365_test_00084232.jpg +Places365_test_00084239.jpg +Places365_test_00084270.jpg +Places365_test_00084273.jpg +Places365_test_00084278.jpg +Places365_test_00084283.jpg +Places365_test_00084284.jpg +Places365_test_00084291.jpg +Places365_test_00084295.jpg +Places365_test_00084299.jpg +Places365_test_00084302.jpg +Places365_test_00084310.jpg +Places365_test_00084314.jpg +Places365_test_00084344.jpg +Places365_test_00084348.jpg +Places365_test_00084363.jpg +Places365_test_00084371.jpg +Places365_test_00084392.jpg +Places365_test_00084394.jpg +Places365_test_00084403.jpg +Places365_test_00084439.jpg +Places365_test_00084448.jpg +Places365_test_00084458.jpg +Places365_test_00084464.jpg +Places365_test_00084473.jpg +Places365_test_00084484.jpg +Places365_test_00084506.jpg +Places365_test_00084507.jpg +Places365_test_00084532.jpg +Places365_test_00084533.jpg +Places365_test_00084549.jpg +Places365_test_00084556.jpg +Places365_test_00084558.jpg +Places365_test_00084560.jpg +Places365_test_00084573.jpg +Places365_test_00084580.jpg +Places365_test_00084588.jpg +Places365_test_00084590.jpg +Places365_test_00084608.jpg +Places365_test_00084614.jpg +Places365_test_00084640.jpg +Places365_test_00084651.jpg +Places365_test_00084653.jpg +Places365_test_00084656.jpg +Places365_test_00084657.jpg +Places365_test_00084661.jpg +Places365_test_00084667.jpg +Places365_test_00084670.jpg +Places365_test_00084702.jpg +Places365_test_00084740.jpg +Places365_test_00084750.jpg +Places365_test_00084772.jpg +Places365_test_00084783.jpg +Places365_test_00084788.jpg +Places365_test_00084791.jpg +Places365_test_00084796.jpg +Places365_test_00084818.jpg +Places365_test_00084851.jpg +Places365_test_00084858.jpg +Places365_test_00084861.jpg +Places365_test_00084872.jpg +Places365_test_00084887.jpg +Places365_test_00084889.jpg +Places365_test_00084892.jpg +Places365_test_00084897.jpg +Places365_test_00084906.jpg +Places365_test_00084910.jpg +Places365_test_00084917.jpg +Places365_test_00084920.jpg +Places365_test_00084928.jpg +Places365_test_00084990.jpg +Places365_test_00084997.jpg +Places365_test_00085008.jpg +Places365_test_00085020.jpg +Places365_test_00085026.jpg +Places365_test_00085053.jpg +Places365_test_00085061.jpg +Places365_test_00085072.jpg +Places365_test_00085125.jpg +Places365_test_00085130.jpg +Places365_test_00085132.jpg +Places365_test_00085133.jpg +Places365_test_00085136.jpg +Places365_test_00085146.jpg +Places365_test_00085150.jpg +Places365_test_00085180.jpg +Places365_test_00085190.jpg +Places365_test_00085201.jpg +Places365_test_00085202.jpg +Places365_test_00085212.jpg +Places365_test_00085217.jpg +Places365_test_00085240.jpg +Places365_test_00085243.jpg +Places365_test_00085253.jpg +Places365_test_00085269.jpg +Places365_test_00085285.jpg +Places365_test_00085319.jpg +Places365_test_00085325.jpg +Places365_test_00085332.jpg +Places365_test_00085365.jpg +Places365_test_00085369.jpg +Places365_test_00085376.jpg +Places365_test_00085383.jpg +Places365_test_00085393.jpg +Places365_test_00085431.jpg +Places365_test_00085460.jpg +Places365_test_00085461.jpg +Places365_test_00085462.jpg +Places365_test_00085478.jpg +Places365_test_00085482.jpg +Places365_test_00085489.jpg +Places365_test_00085510.jpg +Places365_test_00085515.jpg +Places365_test_00085534.jpg +Places365_test_00085542.jpg +Places365_test_00085548.jpg +Places365_test_00085553.jpg +Places365_test_00085566.jpg +Places365_test_00085600.jpg +Places365_test_00085602.jpg +Places365_test_00085603.jpg +Places365_test_00085612.jpg +Places365_test_00085613.jpg +Places365_test_00085614.jpg +Places365_test_00085629.jpg +Places365_test_00085636.jpg +Places365_test_00085658.jpg +Places365_test_00085669.jpg +Places365_test_00085685.jpg +Places365_test_00085695.jpg +Places365_test_00085711.jpg +Places365_test_00085713.jpg +Places365_test_00085718.jpg +Places365_test_00085730.jpg +Places365_test_00085737.jpg +Places365_test_00085742.jpg +Places365_test_00085759.jpg +Places365_test_00085773.jpg +Places365_test_00085778.jpg +Places365_test_00085781.jpg +Places365_test_00085797.jpg +Places365_test_00085803.jpg +Places365_test_00085814.jpg +Places365_test_00085825.jpg +Places365_test_00085842.jpg +Places365_test_00085845.jpg +Places365_test_00085854.jpg +Places365_test_00085855.jpg +Places365_test_00085866.jpg +Places365_test_00085880.jpg +Places365_test_00085902.jpg +Places365_test_00085905.jpg +Places365_test_00085906.jpg +Places365_test_00085924.jpg +Places365_test_00085933.jpg +Places365_test_00085941.jpg +Places365_test_00085952.jpg +Places365_test_00085968.jpg +Places365_test_00085994.jpg +Places365_test_00085996.jpg +Places365_test_00086008.jpg +Places365_test_00086044.jpg +Places365_test_00086046.jpg +Places365_test_00086053.jpg +Places365_test_00086056.jpg +Places365_test_00086058.jpg +Places365_test_00086062.jpg +Places365_test_00086068.jpg +Places365_test_00086098.jpg +Places365_test_00086105.jpg +Places365_test_00086112.jpg +Places365_test_00086116.jpg +Places365_test_00086117.jpg +Places365_test_00086118.jpg +Places365_test_00086134.jpg +Places365_test_00086143.jpg +Places365_test_00086164.jpg +Places365_test_00086165.jpg +Places365_test_00086166.jpg +Places365_test_00086173.jpg +Places365_test_00086182.jpg +Places365_test_00086194.jpg +Places365_test_00086214.jpg +Places365_test_00086222.jpg +Places365_test_00086228.jpg +Places365_test_00086229.jpg +Places365_test_00086237.jpg +Places365_test_00086243.jpg +Places365_test_00086250.jpg +Places365_test_00086253.jpg +Places365_test_00086255.jpg +Places365_test_00086291.jpg +Places365_test_00086298.jpg +Places365_test_00086311.jpg +Places365_test_00086327.jpg +Places365_test_00086340.jpg +Places365_test_00086342.jpg +Places365_test_00086352.jpg +Places365_test_00086353.jpg +Places365_test_00086375.jpg +Places365_test_00086389.jpg +Places365_test_00086400.jpg +Places365_test_00086413.jpg +Places365_test_00086414.jpg +Places365_test_00086417.jpg +Places365_test_00086419.jpg +Places365_test_00086444.jpg +Places365_test_00086484.jpg +Places365_test_00086496.jpg +Places365_test_00086519.jpg +Places365_test_00086527.jpg +Places365_test_00086567.jpg +Places365_test_00086568.jpg +Places365_test_00086575.jpg +Places365_test_00086580.jpg +Places365_test_00086605.jpg +Places365_test_00086620.jpg +Places365_test_00086624.jpg +Places365_test_00086637.jpg +Places365_test_00086643.jpg +Places365_test_00086647.jpg +Places365_test_00086648.jpg +Places365_test_00086662.jpg +Places365_test_00086676.jpg +Places365_test_00086702.jpg +Places365_test_00086703.jpg +Places365_test_00086704.jpg +Places365_test_00086706.jpg +Places365_test_00086710.jpg +Places365_test_00086725.jpg +Places365_test_00086748.jpg +Places365_test_00086769.jpg +Places365_test_00086777.jpg +Places365_test_00086782.jpg +Places365_test_00086808.jpg +Places365_test_00086819.jpg +Places365_test_00086820.jpg +Places365_test_00086827.jpg +Places365_test_00086838.jpg +Places365_test_00086848.jpg +Places365_test_00086850.jpg +Places365_test_00086877.jpg +Places365_test_00086893.jpg +Places365_test_00086895.jpg +Places365_test_00086896.jpg +Places365_test_00086917.jpg +Places365_test_00086940.jpg +Places365_test_00086942.jpg +Places365_test_00086955.jpg +Places365_test_00086965.jpg +Places365_test_00086980.jpg +Places365_test_00086994.jpg +Places365_test_00087001.jpg +Places365_test_00087024.jpg +Places365_test_00087041.jpg +Places365_test_00087046.jpg +Places365_test_00087052.jpg +Places365_test_00087096.jpg +Places365_test_00087097.jpg +Places365_test_00087099.jpg +Places365_test_00087108.jpg +Places365_test_00087120.jpg +Places365_test_00087150.jpg +Places365_test_00087162.jpg +Places365_test_00087177.jpg +Places365_test_00087182.jpg +Places365_test_00087189.jpg +Places365_test_00087190.jpg +Places365_test_00087193.jpg +Places365_test_00087219.jpg +Places365_test_00087238.jpg +Places365_test_00087263.jpg +Places365_test_00087272.jpg +Places365_test_00087276.jpg +Places365_test_00087278.jpg +Places365_test_00087292.jpg +Places365_test_00087299.jpg +Places365_test_00087305.jpg +Places365_test_00087306.jpg +Places365_test_00087322.jpg +Places365_test_00087329.jpg +Places365_test_00087335.jpg +Places365_test_00087341.jpg +Places365_test_00087351.jpg +Places365_test_00087353.jpg +Places365_test_00087367.jpg +Places365_test_00087374.jpg +Places365_test_00087425.jpg +Places365_test_00087439.jpg +Places365_test_00087442.jpg +Places365_test_00087445.jpg +Places365_test_00087449.jpg +Places365_test_00087458.jpg +Places365_test_00087470.jpg +Places365_test_00087480.jpg +Places365_test_00087501.jpg +Places365_test_00087506.jpg +Places365_test_00087510.jpg +Places365_test_00087511.jpg +Places365_test_00087512.jpg +Places365_test_00087515.jpg +Places365_test_00087518.jpg +Places365_test_00087529.jpg +Places365_test_00087534.jpg +Places365_test_00087541.jpg +Places365_test_00087542.jpg +Places365_test_00087565.jpg +Places365_test_00087576.jpg +Places365_test_00087579.jpg +Places365_test_00087598.jpg +Places365_test_00087608.jpg +Places365_test_00087622.jpg +Places365_test_00087632.jpg +Places365_test_00087643.jpg +Places365_test_00087658.jpg +Places365_test_00087661.jpg +Places365_test_00087665.jpg +Places365_test_00087694.jpg +Places365_test_00087695.jpg +Places365_test_00087712.jpg +Places365_test_00087726.jpg +Places365_test_00087774.jpg +Places365_test_00087785.jpg +Places365_test_00087789.jpg +Places365_test_00087791.jpg +Places365_test_00087804.jpg +Places365_test_00087806.jpg +Places365_test_00087809.jpg +Places365_test_00087817.jpg +Places365_test_00087827.jpg +Places365_test_00087831.jpg +Places365_test_00087842.jpg +Places365_test_00087856.jpg +Places365_test_00087858.jpg +Places365_test_00087865.jpg +Places365_test_00087869.jpg +Places365_test_00087877.jpg +Places365_test_00087880.jpg +Places365_test_00087914.jpg +Places365_test_00087919.jpg +Places365_test_00087931.jpg +Places365_test_00087945.jpg +Places365_test_00087955.jpg +Places365_test_00087964.jpg +Places365_test_00087965.jpg +Places365_test_00087973.jpg +Places365_test_00088002.jpg +Places365_test_00088022.jpg +Places365_test_00088041.jpg +Places365_test_00088051.jpg +Places365_test_00088060.jpg +Places365_test_00088065.jpg +Places365_test_00088066.jpg +Places365_test_00088071.jpg +Places365_test_00088079.jpg +Places365_test_00088097.jpg +Places365_test_00088104.jpg +Places365_test_00088117.jpg +Places365_test_00088138.jpg +Places365_test_00088149.jpg +Places365_test_00088182.jpg +Places365_test_00088191.jpg +Places365_test_00088195.jpg +Places365_test_00088218.jpg +Places365_test_00088221.jpg +Places365_test_00088239.jpg +Places365_test_00088243.jpg +Places365_test_00088269.jpg +Places365_test_00088272.jpg +Places365_test_00088277.jpg +Places365_test_00088280.jpg +Places365_test_00088285.jpg +Places365_test_00088286.jpg +Places365_test_00088289.jpg +Places365_test_00088291.jpg +Places365_test_00088295.jpg +Places365_test_00088304.jpg +Places365_test_00088308.jpg +Places365_test_00088322.jpg +Places365_test_00088338.jpg +Places365_test_00088347.jpg +Places365_test_00088348.jpg +Places365_test_00088373.jpg +Places365_test_00088409.jpg +Places365_test_00088415.jpg +Places365_test_00088419.jpg +Places365_test_00088431.jpg +Places365_test_00088442.jpg +Places365_test_00088452.jpg +Places365_test_00088465.jpg +Places365_test_00088472.jpg +Places365_test_00088486.jpg +Places365_test_00088497.jpg +Places365_test_00088500.jpg +Places365_test_00088524.jpg +Places365_test_00088529.jpg +Places365_test_00088532.jpg +Places365_test_00088538.jpg +Places365_test_00088542.jpg +Places365_test_00088559.jpg +Places365_test_00088582.jpg +Places365_test_00088586.jpg +Places365_test_00088594.jpg +Places365_test_00088603.jpg +Places365_test_00088635.jpg +Places365_test_00088645.jpg +Places365_test_00088652.jpg +Places365_test_00088667.jpg +Places365_test_00088668.jpg +Places365_test_00088695.jpg +Places365_test_00088701.jpg +Places365_test_00088723.jpg +Places365_test_00088737.jpg +Places365_test_00088741.jpg +Places365_test_00088760.jpg +Places365_test_00088795.jpg +Places365_test_00088820.jpg +Places365_test_00088828.jpg +Places365_test_00088831.jpg +Places365_test_00088838.jpg +Places365_test_00088855.jpg +Places365_test_00088881.jpg +Places365_test_00088882.jpg +Places365_test_00088884.jpg +Places365_test_00088890.jpg +Places365_test_00088893.jpg +Places365_test_00088921.jpg +Places365_test_00088925.jpg +Places365_test_00088928.jpg +Places365_test_00088929.jpg +Places365_test_00088933.jpg +Places365_test_00088936.jpg +Places365_test_00088946.jpg +Places365_test_00088954.jpg +Places365_test_00088984.jpg +Places365_test_00088987.jpg +Places365_test_00088997.jpg +Places365_test_00089013.jpg +Places365_test_00089016.jpg +Places365_test_00089048.jpg +Places365_test_00089060.jpg +Places365_test_00089071.jpg +Places365_test_00089079.jpg +Places365_test_00089084.jpg +Places365_test_00089088.jpg +Places365_test_00089089.jpg +Places365_test_00089093.jpg +Places365_test_00089110.jpg +Places365_test_00089130.jpg +Places365_test_00089132.jpg +Places365_test_00089141.jpg +Places365_test_00089142.jpg +Places365_test_00089145.jpg +Places365_test_00089152.jpg +Places365_test_00089156.jpg +Places365_test_00089169.jpg +Places365_test_00089222.jpg +Places365_test_00089231.jpg +Places365_test_00089241.jpg +Places365_test_00089261.jpg +Places365_test_00089294.jpg +Places365_test_00089305.jpg +Places365_test_00089313.jpg +Places365_test_00089339.jpg +Places365_test_00089350.jpg +Places365_test_00089372.jpg +Places365_test_00089383.jpg +Places365_test_00089385.jpg +Places365_test_00089409.jpg +Places365_test_00089411.jpg +Places365_test_00089438.jpg +Places365_test_00089440.jpg +Places365_test_00089442.jpg +Places365_test_00089450.jpg +Places365_test_00089464.jpg +Places365_test_00089477.jpg +Places365_test_00089479.jpg +Places365_test_00089517.jpg +Places365_test_00089518.jpg +Places365_test_00089520.jpg +Places365_test_00089541.jpg +Places365_test_00089572.jpg +Places365_test_00089573.jpg +Places365_test_00089574.jpg +Places365_test_00089648.jpg +Places365_test_00089654.jpg +Places365_test_00089661.jpg +Places365_test_00089662.jpg +Places365_test_00089669.jpg +Places365_test_00089687.jpg +Places365_test_00089692.jpg +Places365_test_00089698.jpg +Places365_test_00089718.jpg +Places365_test_00089725.jpg +Places365_test_00089726.jpg +Places365_test_00089736.jpg +Places365_test_00089740.jpg +Places365_test_00089775.jpg +Places365_test_00089785.jpg +Places365_test_00089798.jpg +Places365_test_00089801.jpg +Places365_test_00089828.jpg +Places365_test_00089839.jpg +Places365_test_00089849.jpg +Places365_test_00089851.jpg +Places365_test_00089858.jpg +Places365_test_00089870.jpg +Places365_test_00089878.jpg +Places365_test_00089884.jpg +Places365_test_00089885.jpg +Places365_test_00089914.jpg +Places365_test_00089929.jpg +Places365_test_00089936.jpg +Places365_test_00089959.jpg +Places365_test_00089961.jpg +Places365_test_00089994.jpg +Places365_test_00090001.jpg +Places365_test_00090010.jpg +Places365_test_00090018.jpg +Places365_test_00090020.jpg +Places365_test_00090043.jpg +Places365_test_00090074.jpg +Places365_test_00090075.jpg +Places365_test_00090081.jpg +Places365_test_00090089.jpg +Places365_test_00090094.jpg +Places365_test_00090104.jpg +Places365_test_00090110.jpg +Places365_test_00090129.jpg +Places365_test_00090149.jpg +Places365_test_00090166.jpg +Places365_test_00090173.jpg +Places365_test_00090193.jpg +Places365_test_00090200.jpg +Places365_test_00090206.jpg +Places365_test_00090240.jpg +Places365_test_00090241.jpg +Places365_test_00090244.jpg +Places365_test_00090254.jpg +Places365_test_00090256.jpg +Places365_test_00090258.jpg +Places365_test_00090263.jpg +Places365_test_00090266.jpg +Places365_test_00090285.jpg +Places365_test_00090290.jpg +Places365_test_00090298.jpg +Places365_test_00090299.jpg +Places365_test_00090307.jpg +Places365_test_00090313.jpg +Places365_test_00090316.jpg +Places365_test_00090319.jpg +Places365_test_00090381.jpg +Places365_test_00090389.jpg +Places365_test_00090391.jpg +Places365_test_00090398.jpg +Places365_test_00090400.jpg +Places365_test_00090402.jpg +Places365_test_00090405.jpg +Places365_test_00090413.jpg +Places365_test_00090414.jpg +Places365_test_00090424.jpg +Places365_test_00090449.jpg +Places365_test_00090457.jpg +Places365_test_00090465.jpg +Places365_test_00090476.jpg +Places365_test_00090482.jpg +Places365_test_00090483.jpg +Places365_test_00090489.jpg +Places365_test_00090492.jpg +Places365_test_00090506.jpg +Places365_test_00090521.jpg +Places365_test_00090543.jpg +Places365_test_00090568.jpg +Places365_test_00090640.jpg +Places365_test_00090641.jpg +Places365_test_00090653.jpg +Places365_test_00090659.jpg +Places365_test_00090663.jpg +Places365_test_00090681.jpg +Places365_test_00090694.jpg +Places365_test_00090720.jpg +Places365_test_00090731.jpg +Places365_test_00090734.jpg +Places365_test_00090736.jpg +Places365_test_00090749.jpg +Places365_test_00090750.jpg +Places365_test_00090756.jpg +Places365_test_00090780.jpg +Places365_test_00090798.jpg +Places365_test_00090808.jpg +Places365_test_00090829.jpg +Places365_test_00090836.jpg +Places365_test_00090840.jpg +Places365_test_00090865.jpg +Places365_test_00090888.jpg +Places365_test_00090892.jpg +Places365_test_00090902.jpg +Places365_test_00090911.jpg +Places365_test_00090919.jpg +Places365_test_00090937.jpg +Places365_test_00090941.jpg +Places365_test_00090943.jpg +Places365_test_00090968.jpg +Places365_test_00091009.jpg +Places365_test_00091028.jpg +Places365_test_00091046.jpg +Places365_test_00091059.jpg +Places365_test_00091078.jpg +Places365_test_00091083.jpg +Places365_test_00091090.jpg +Places365_test_00091103.jpg +Places365_test_00091111.jpg +Places365_test_00091114.jpg +Places365_test_00091127.jpg +Places365_test_00091138.jpg +Places365_test_00091156.jpg +Places365_test_00091167.jpg +Places365_test_00091186.jpg +Places365_test_00091188.jpg +Places365_test_00091195.jpg +Places365_test_00091235.jpg +Places365_test_00091250.jpg +Places365_test_00091256.jpg +Places365_test_00091264.jpg +Places365_test_00091281.jpg +Places365_test_00091283.jpg +Places365_test_00091289.jpg +Places365_test_00091303.jpg +Places365_test_00091314.jpg +Places365_test_00091331.jpg +Places365_test_00091350.jpg +Places365_test_00091372.jpg +Places365_test_00091373.jpg +Places365_test_00091377.jpg +Places365_test_00091386.jpg +Places365_test_00091402.jpg +Places365_test_00091435.jpg +Places365_test_00091444.jpg +Places365_test_00091481.jpg +Places365_test_00091496.jpg +Places365_test_00091500.jpg +Places365_test_00091507.jpg +Places365_test_00091517.jpg +Places365_test_00091549.jpg +Places365_test_00091550.jpg +Places365_test_00091558.jpg +Places365_test_00091559.jpg +Places365_test_00091567.jpg +Places365_test_00091577.jpg +Places365_test_00091578.jpg +Places365_test_00091606.jpg +Places365_test_00091634.jpg +Places365_test_00091636.jpg +Places365_test_00091641.jpg +Places365_test_00091642.jpg +Places365_test_00091645.jpg +Places365_test_00091652.jpg +Places365_test_00091662.jpg +Places365_test_00091668.jpg +Places365_test_00091675.jpg +Places365_test_00091679.jpg +Places365_test_00091688.jpg +Places365_test_00091692.jpg +Places365_test_00091698.jpg +Places365_test_00091701.jpg +Places365_test_00091702.jpg +Places365_test_00091705.jpg +Places365_test_00091707.jpg +Places365_test_00091729.jpg +Places365_test_00091731.jpg +Places365_test_00091735.jpg +Places365_test_00091740.jpg +Places365_test_00091754.jpg +Places365_test_00091761.jpg +Places365_test_00091769.jpg +Places365_test_00091776.jpg +Places365_test_00091786.jpg +Places365_test_00091794.jpg +Places365_test_00091798.jpg +Places365_test_00091801.jpg +Places365_test_00091816.jpg +Places365_test_00091817.jpg +Places365_test_00091835.jpg +Places365_test_00091840.jpg +Places365_test_00091843.jpg +Places365_test_00091845.jpg +Places365_test_00091871.jpg +Places365_test_00091890.jpg +Places365_test_00091895.jpg +Places365_test_00091900.jpg +Places365_test_00091930.jpg +Places365_test_00091933.jpg +Places365_test_00091936.jpg +Places365_test_00091940.jpg +Places365_test_00091946.jpg +Places365_test_00091977.jpg +Places365_test_00091978.jpg +Places365_test_00091981.jpg +Places365_test_00091988.jpg +Places365_test_00092003.jpg +Places365_test_00092009.jpg +Places365_test_00092034.jpg +Places365_test_00092045.jpg +Places365_test_00092046.jpg +Places365_test_00092051.jpg +Places365_test_00092059.jpg +Places365_test_00092065.jpg +Places365_test_00092116.jpg +Places365_test_00092119.jpg +Places365_test_00092133.jpg +Places365_test_00092141.jpg +Places365_test_00092143.jpg +Places365_test_00092146.jpg +Places365_test_00092151.jpg +Places365_test_00092161.jpg +Places365_test_00092193.jpg +Places365_test_00092203.jpg +Places365_test_00092207.jpg +Places365_test_00092210.jpg +Places365_test_00092212.jpg +Places365_test_00092219.jpg +Places365_test_00092222.jpg +Places365_test_00092233.jpg +Places365_test_00092236.jpg +Places365_test_00092237.jpg +Places365_test_00092240.jpg +Places365_test_00092241.jpg +Places365_test_00092250.jpg +Places365_test_00092294.jpg +Places365_test_00092297.jpg +Places365_test_00092306.jpg +Places365_test_00092309.jpg +Places365_test_00092329.jpg +Places365_test_00092334.jpg +Places365_test_00092339.jpg +Places365_test_00092342.jpg +Places365_test_00092349.jpg +Places365_test_00092354.jpg +Places365_test_00092381.jpg +Places365_test_00092384.jpg +Places365_test_00092386.jpg +Places365_test_00092398.jpg +Places365_test_00092403.jpg +Places365_test_00092405.jpg +Places365_test_00092407.jpg +Places365_test_00092412.jpg +Places365_test_00092413.jpg +Places365_test_00092414.jpg +Places365_test_00092428.jpg +Places365_test_00092429.jpg +Places365_test_00092435.jpg +Places365_test_00092438.jpg +Places365_test_00092442.jpg +Places365_test_00092514.jpg +Places365_test_00092517.jpg +Places365_test_00092523.jpg +Places365_test_00092525.jpg +Places365_test_00092530.jpg +Places365_test_00092547.jpg +Places365_test_00092552.jpg +Places365_test_00092570.jpg +Places365_test_00092573.jpg +Places365_test_00092586.jpg +Places365_test_00092590.jpg +Places365_test_00092594.jpg +Places365_test_00092597.jpg +Places365_test_00092598.jpg +Places365_test_00092635.jpg +Places365_test_00092666.jpg +Places365_test_00092670.jpg +Places365_test_00092671.jpg +Places365_test_00092693.jpg +Places365_test_00092703.jpg +Places365_test_00092706.jpg +Places365_test_00092716.jpg +Places365_test_00092745.jpg +Places365_test_00092750.jpg +Places365_test_00092757.jpg +Places365_test_00092761.jpg +Places365_test_00092769.jpg +Places365_test_00092772.jpg +Places365_test_00092774.jpg +Places365_test_00092779.jpg +Places365_test_00092809.jpg +Places365_test_00092816.jpg +Places365_test_00092822.jpg +Places365_test_00092830.jpg +Places365_test_00092838.jpg +Places365_test_00092852.jpg +Places365_test_00092859.jpg +Places365_test_00092868.jpg +Places365_test_00092880.jpg +Places365_test_00092888.jpg +Places365_test_00092903.jpg +Places365_test_00092934.jpg +Places365_test_00092947.jpg +Places365_test_00092949.jpg +Places365_test_00092973.jpg +Places365_test_00092987.jpg +Places365_test_00093011.jpg +Places365_test_00093014.jpg +Places365_test_00093032.jpg +Places365_test_00093037.jpg +Places365_test_00093039.jpg +Places365_test_00093044.jpg +Places365_test_00093050.jpg +Places365_test_00093064.jpg +Places365_test_00093083.jpg +Places365_test_00093088.jpg +Places365_test_00093091.jpg +Places365_test_00093100.jpg +Places365_test_00093141.jpg +Places365_test_00093153.jpg +Places365_test_00093159.jpg +Places365_test_00093160.jpg +Places365_test_00093166.jpg +Places365_test_00093173.jpg +Places365_test_00093174.jpg +Places365_test_00093176.jpg +Places365_test_00093202.jpg +Places365_test_00093243.jpg +Places365_test_00093253.jpg +Places365_test_00093271.jpg +Places365_test_00093277.jpg +Places365_test_00093278.jpg +Places365_test_00093282.jpg +Places365_test_00093285.jpg +Places365_test_00093296.jpg +Places365_test_00093305.jpg +Places365_test_00093310.jpg +Places365_test_00093313.jpg +Places365_test_00093323.jpg +Places365_test_00093335.jpg +Places365_test_00093350.jpg +Places365_test_00093354.jpg +Places365_test_00093366.jpg +Places365_test_00093369.jpg +Places365_test_00093387.jpg +Places365_test_00093392.jpg +Places365_test_00093396.jpg +Places365_test_00093416.jpg +Places365_test_00093437.jpg +Places365_test_00093440.jpg +Places365_test_00093451.jpg +Places365_test_00093472.jpg +Places365_test_00093483.jpg +Places365_test_00093485.jpg +Places365_test_00093497.jpg +Places365_test_00093498.jpg +Places365_test_00093502.jpg +Places365_test_00093505.jpg +Places365_test_00093513.jpg +Places365_test_00093518.jpg +Places365_test_00093527.jpg +Places365_test_00093531.jpg +Places365_test_00093548.jpg +Places365_test_00093560.jpg +Places365_test_00093570.jpg +Places365_test_00093571.jpg +Places365_test_00093577.jpg +Places365_test_00093583.jpg +Places365_test_00093599.jpg +Places365_test_00093629.jpg +Places365_test_00093644.jpg +Places365_test_00093657.jpg +Places365_test_00093661.jpg +Places365_test_00093664.jpg +Places365_test_00093667.jpg +Places365_test_00093692.jpg +Places365_test_00093693.jpg +Places365_test_00093696.jpg +Places365_test_00093751.jpg +Places365_test_00093758.jpg +Places365_test_00093762.jpg +Places365_test_00093786.jpg +Places365_test_00093792.jpg +Places365_test_00093796.jpg +Places365_test_00093799.jpg +Places365_test_00093815.jpg +Places365_test_00093853.jpg +Places365_test_00093859.jpg +Places365_test_00093875.jpg +Places365_test_00093889.jpg +Places365_test_00093900.jpg +Places365_test_00093903.jpg +Places365_test_00093927.jpg +Places365_test_00093938.jpg +Places365_test_00093957.jpg +Places365_test_00093958.jpg +Places365_test_00093969.jpg +Places365_test_00093975.jpg +Places365_test_00093980.jpg +Places365_test_00093992.jpg +Places365_test_00093993.jpg +Places365_test_00094031.jpg +Places365_test_00094040.jpg +Places365_test_00094049.jpg +Places365_test_00094052.jpg +Places365_test_00094065.jpg +Places365_test_00094066.jpg +Places365_test_00094074.jpg +Places365_test_00094080.jpg +Places365_test_00094085.jpg +Places365_test_00094090.jpg +Places365_test_00094099.jpg +Places365_test_00094110.jpg +Places365_test_00094119.jpg +Places365_test_00094127.jpg +Places365_test_00094134.jpg +Places365_test_00094153.jpg +Places365_test_00094159.jpg +Places365_test_00094161.jpg +Places365_test_00094162.jpg +Places365_test_00094180.jpg +Places365_test_00094187.jpg +Places365_test_00094193.jpg +Places365_test_00094196.jpg +Places365_test_00094206.jpg +Places365_test_00094207.jpg +Places365_test_00094218.jpg +Places365_test_00094244.jpg +Places365_test_00094295.jpg +Places365_test_00094318.jpg +Places365_test_00094319.jpg +Places365_test_00094321.jpg +Places365_test_00094336.jpg +Places365_test_00094340.jpg +Places365_test_00094341.jpg +Places365_test_00094342.jpg +Places365_test_00094350.jpg +Places365_test_00094408.jpg +Places365_test_00094419.jpg +Places365_test_00094425.jpg +Places365_test_00094426.jpg +Places365_test_00094446.jpg +Places365_test_00094448.jpg +Places365_test_00094460.jpg +Places365_test_00094462.jpg +Places365_test_00094495.jpg +Places365_test_00094496.jpg +Places365_test_00094511.jpg +Places365_test_00094521.jpg +Places365_test_00094523.jpg +Places365_test_00094532.jpg +Places365_test_00094534.jpg +Places365_test_00094548.jpg +Places365_test_00094566.jpg +Places365_test_00094573.jpg +Places365_test_00094576.jpg +Places365_test_00094589.jpg +Places365_test_00094592.jpg +Places365_test_00094593.jpg +Places365_test_00094595.jpg +Places365_test_00094597.jpg +Places365_test_00094599.jpg +Places365_test_00094602.jpg +Places365_test_00094613.jpg +Places365_test_00094616.jpg +Places365_test_00094620.jpg +Places365_test_00094630.jpg +Places365_test_00094634.jpg +Places365_test_00094636.jpg +Places365_test_00094661.jpg +Places365_test_00094675.jpg +Places365_test_00094698.jpg +Places365_test_00094700.jpg +Places365_test_00094701.jpg +Places365_test_00094714.jpg +Places365_test_00094723.jpg +Places365_test_00094746.jpg +Places365_test_00094789.jpg +Places365_test_00094791.jpg +Places365_test_00094794.jpg +Places365_test_00094799.jpg +Places365_test_00094807.jpg +Places365_test_00094814.jpg +Places365_test_00094823.jpg +Places365_test_00094830.jpg +Places365_test_00094837.jpg +Places365_test_00094847.jpg +Places365_test_00094863.jpg +Places365_test_00094869.jpg +Places365_test_00094881.jpg +Places365_test_00094892.jpg +Places365_test_00094906.jpg +Places365_test_00094951.jpg +Places365_test_00094970.jpg +Places365_test_00095015.jpg +Places365_test_00095018.jpg +Places365_test_00095022.jpg +Places365_test_00095027.jpg +Places365_test_00095056.jpg +Places365_test_00095058.jpg +Places365_test_00095059.jpg +Places365_test_00095073.jpg +Places365_test_00095077.jpg +Places365_test_00095089.jpg +Places365_test_00095112.jpg +Places365_test_00095124.jpg +Places365_test_00095137.jpg +Places365_test_00095141.jpg +Places365_test_00095142.jpg +Places365_test_00095148.jpg +Places365_test_00095149.jpg +Places365_test_00095161.jpg +Places365_test_00095162.jpg +Places365_test_00095176.jpg +Places365_test_00095192.jpg +Places365_test_00095194.jpg +Places365_test_00095210.jpg +Places365_test_00095211.jpg +Places365_test_00095213.jpg +Places365_test_00095217.jpg +Places365_test_00095233.jpg +Places365_test_00095278.jpg +Places365_test_00095289.jpg +Places365_test_00095325.jpg +Places365_test_00095326.jpg +Places365_test_00095344.jpg +Places365_test_00095358.jpg +Places365_test_00095362.jpg +Places365_test_00095391.jpg +Places365_test_00095399.jpg +Places365_test_00095428.jpg +Places365_test_00095449.jpg +Places365_test_00095450.jpg +Places365_test_00095456.jpg +Places365_test_00095466.jpg +Places365_test_00095498.jpg +Places365_test_00095558.jpg +Places365_test_00095559.jpg +Places365_test_00095561.jpg +Places365_test_00095579.jpg +Places365_test_00095584.jpg +Places365_test_00095591.jpg +Places365_test_00095597.jpg +Places365_test_00095609.jpg +Places365_test_00095651.jpg +Places365_test_00095657.jpg +Places365_test_00095691.jpg +Places365_test_00095697.jpg +Places365_test_00095716.jpg +Places365_test_00095740.jpg +Places365_test_00095742.jpg +Places365_test_00095751.jpg +Places365_test_00095756.jpg +Places365_test_00095757.jpg +Places365_test_00095758.jpg +Places365_test_00095780.jpg +Places365_test_00095788.jpg +Places365_test_00095789.jpg +Places365_test_00095791.jpg +Places365_test_00095796.jpg +Places365_test_00095823.jpg +Places365_test_00095846.jpg +Places365_test_00095858.jpg +Places365_test_00095890.jpg +Places365_test_00095891.jpg +Places365_test_00095896.jpg +Places365_test_00095903.jpg +Places365_test_00095905.jpg +Places365_test_00095911.jpg +Places365_test_00095924.jpg +Places365_test_00095942.jpg +Places365_test_00095951.jpg +Places365_test_00095953.jpg +Places365_test_00095954.jpg +Places365_test_00095980.jpg +Places365_test_00095984.jpg +Places365_test_00095989.jpg +Places365_test_00096010.jpg +Places365_test_00096013.jpg +Places365_test_00096014.jpg +Places365_test_00096021.jpg +Places365_test_00096043.jpg +Places365_test_00096052.jpg +Places365_test_00096080.jpg +Places365_test_00096085.jpg +Places365_test_00096105.jpg +Places365_test_00096106.jpg +Places365_test_00096110.jpg +Places365_test_00096116.jpg +Places365_test_00096118.jpg +Places365_test_00096124.jpg +Places365_test_00096128.jpg +Places365_test_00096129.jpg +Places365_test_00096145.jpg +Places365_test_00096154.jpg +Places365_test_00096155.jpg +Places365_test_00096167.jpg +Places365_test_00096188.jpg +Places365_test_00096213.jpg +Places365_test_00096218.jpg +Places365_test_00096225.jpg +Places365_test_00096226.jpg +Places365_test_00096227.jpg +Places365_test_00096232.jpg +Places365_test_00096242.jpg +Places365_test_00096246.jpg +Places365_test_00096251.jpg +Places365_test_00096262.jpg +Places365_test_00096280.jpg +Places365_test_00096285.jpg +Places365_test_00096286.jpg +Places365_test_00096291.jpg +Places365_test_00096298.jpg +Places365_test_00096315.jpg +Places365_test_00096333.jpg +Places365_test_00096341.jpg +Places365_test_00096344.jpg +Places365_test_00096371.jpg +Places365_test_00096372.jpg +Places365_test_00096373.jpg +Places365_test_00096382.jpg +Places365_test_00096411.jpg +Places365_test_00096421.jpg +Places365_test_00096423.jpg +Places365_test_00096432.jpg +Places365_test_00096440.jpg +Places365_test_00096452.jpg +Places365_test_00096454.jpg +Places365_test_00096476.jpg +Places365_test_00096477.jpg +Places365_test_00096488.jpg +Places365_test_00096513.jpg +Places365_test_00096544.jpg +Places365_test_00096573.jpg +Places365_test_00096578.jpg +Places365_test_00096595.jpg +Places365_test_00096607.jpg +Places365_test_00096622.jpg +Places365_test_00096623.jpg +Places365_test_00096624.jpg +Places365_test_00096633.jpg +Places365_test_00096652.jpg +Places365_test_00096659.jpg +Places365_test_00096662.jpg +Places365_test_00096664.jpg +Places365_test_00096693.jpg +Places365_test_00096708.jpg +Places365_test_00096727.jpg +Places365_test_00096735.jpg +Places365_test_00096740.jpg +Places365_test_00096763.jpg +Places365_test_00096766.jpg +Places365_test_00096793.jpg +Places365_test_00096798.jpg +Places365_test_00096801.jpg +Places365_test_00096852.jpg +Places365_test_00096855.jpg +Places365_test_00096860.jpg +Places365_test_00096862.jpg +Places365_test_00096869.jpg +Places365_test_00096877.jpg +Places365_test_00096911.jpg +Places365_test_00096922.jpg +Places365_test_00096923.jpg +Places365_test_00096925.jpg +Places365_test_00096953.jpg +Places365_test_00096963.jpg +Places365_test_00096982.jpg +Places365_test_00096985.jpg +Places365_test_00096989.jpg +Places365_test_00097009.jpg +Places365_test_00097011.jpg +Places365_test_00097019.jpg +Places365_test_00097030.jpg +Places365_test_00097063.jpg +Places365_test_00097070.jpg +Places365_test_00097074.jpg +Places365_test_00097075.jpg +Places365_test_00097093.jpg +Places365_test_00097110.jpg +Places365_test_00097121.jpg +Places365_test_00097123.jpg +Places365_test_00097159.jpg +Places365_test_00097168.jpg +Places365_test_00097170.jpg +Places365_test_00097176.jpg +Places365_test_00097179.jpg +Places365_test_00097182.jpg +Places365_test_00097193.jpg +Places365_test_00097196.jpg +Places365_test_00097220.jpg +Places365_test_00097231.jpg +Places365_test_00097233.jpg +Places365_test_00097260.jpg +Places365_test_00097273.jpg +Places365_test_00097278.jpg +Places365_test_00097287.jpg +Places365_test_00097298.jpg +Places365_test_00097316.jpg +Places365_test_00097319.jpg +Places365_test_00097336.jpg +Places365_test_00097355.jpg +Places365_test_00097365.jpg +Places365_test_00097375.jpg +Places365_test_00097402.jpg +Places365_test_00097405.jpg +Places365_test_00097414.jpg +Places365_test_00097416.jpg +Places365_test_00097422.jpg +Places365_test_00097423.jpg +Places365_test_00097426.jpg +Places365_test_00097447.jpg +Places365_test_00097451.jpg +Places365_test_00097453.jpg +Places365_test_00097460.jpg +Places365_test_00097465.jpg +Places365_test_00097469.jpg +Places365_test_00097480.jpg +Places365_test_00097483.jpg +Places365_test_00097485.jpg +Places365_test_00097492.jpg +Places365_test_00097522.jpg +Places365_test_00097532.jpg +Places365_test_00097542.jpg +Places365_test_00097543.jpg +Places365_test_00097554.jpg +Places365_test_00097561.jpg +Places365_test_00097563.jpg +Places365_test_00097570.jpg +Places365_test_00097586.jpg +Places365_test_00097633.jpg +Places365_test_00097639.jpg +Places365_test_00097649.jpg +Places365_test_00097657.jpg +Places365_test_00097660.jpg +Places365_test_00097664.jpg +Places365_test_00097667.jpg +Places365_test_00097697.jpg +Places365_test_00097710.jpg +Places365_test_00097711.jpg +Places365_test_00097715.jpg +Places365_test_00097774.jpg +Places365_test_00097780.jpg +Places365_test_00097781.jpg +Places365_test_00097800.jpg +Places365_test_00097810.jpg +Places365_test_00097812.jpg +Places365_test_00097831.jpg +Places365_test_00097835.jpg +Places365_test_00097856.jpg +Places365_test_00097859.jpg +Places365_test_00097867.jpg +Places365_test_00097868.jpg +Places365_test_00097871.jpg +Places365_test_00097875.jpg +Places365_test_00097876.jpg +Places365_test_00097891.jpg +Places365_test_00097915.jpg +Places365_test_00097923.jpg +Places365_test_00097929.jpg +Places365_test_00097944.jpg +Places365_test_00097945.jpg +Places365_test_00097958.jpg +Places365_test_00097964.jpg +Places365_test_00098062.jpg +Places365_test_00098069.jpg +Places365_test_00098078.jpg +Places365_test_00098115.jpg +Places365_test_00098138.jpg +Places365_test_00098147.jpg +Places365_test_00098156.jpg +Places365_test_00098162.jpg +Places365_test_00098163.jpg +Places365_test_00098177.jpg +Places365_test_00098183.jpg +Places365_test_00098184.jpg +Places365_test_00098205.jpg +Places365_test_00098217.jpg +Places365_test_00098221.jpg +Places365_test_00098233.jpg +Places365_test_00098234.jpg +Places365_test_00098241.jpg +Places365_test_00098244.jpg +Places365_test_00098248.jpg +Places365_test_00098256.jpg +Places365_test_00098296.jpg +Places365_test_00098302.jpg +Places365_test_00098316.jpg +Places365_test_00098337.jpg +Places365_test_00098352.jpg +Places365_test_00098353.jpg +Places365_test_00098356.jpg +Places365_test_00098360.jpg +Places365_test_00098363.jpg +Places365_test_00098392.jpg +Places365_test_00098397.jpg +Places365_test_00098415.jpg +Places365_test_00098418.jpg +Places365_test_00098433.jpg +Places365_test_00098441.jpg +Places365_test_00098443.jpg +Places365_test_00098453.jpg +Places365_test_00098472.jpg +Places365_test_00098488.jpg +Places365_test_00098504.jpg +Places365_test_00098531.jpg +Places365_test_00098534.jpg +Places365_test_00098544.jpg +Places365_test_00098555.jpg +Places365_test_00098570.jpg +Places365_test_00098581.jpg +Places365_test_00098586.jpg +Places365_test_00098590.jpg +Places365_test_00098591.jpg +Places365_test_00098605.jpg +Places365_test_00098618.jpg +Places365_test_00098620.jpg +Places365_test_00098659.jpg +Places365_test_00098667.jpg +Places365_test_00098668.jpg +Places365_test_00098679.jpg +Places365_test_00098685.jpg +Places365_test_00098688.jpg +Places365_test_00098692.jpg +Places365_test_00098720.jpg +Places365_test_00098727.jpg +Places365_test_00098770.jpg +Places365_test_00098790.jpg +Places365_test_00098806.jpg +Places365_test_00098824.jpg +Places365_test_00098832.jpg +Places365_test_00098853.jpg +Places365_test_00098878.jpg +Places365_test_00098883.jpg +Places365_test_00098885.jpg +Places365_test_00098894.jpg +Places365_test_00098899.jpg +Places365_test_00098901.jpg +Places365_test_00098905.jpg +Places365_test_00098925.jpg +Places365_test_00098935.jpg +Places365_test_00098937.jpg +Places365_test_00098943.jpg +Places365_test_00098947.jpg +Places365_test_00098948.jpg +Places365_test_00098965.jpg +Places365_test_00098978.jpg +Places365_test_00098986.jpg +Places365_test_00098998.jpg +Places365_test_00099003.jpg +Places365_test_00099004.jpg +Places365_test_00099007.jpg +Places365_test_00099016.jpg +Places365_test_00099022.jpg +Places365_test_00099025.jpg +Places365_test_00099035.jpg +Places365_test_00099038.jpg +Places365_test_00099050.jpg +Places365_test_00099053.jpg +Places365_test_00099057.jpg +Places365_test_00099062.jpg +Places365_test_00099087.jpg +Places365_test_00099094.jpg +Places365_test_00099115.jpg +Places365_test_00099116.jpg +Places365_test_00099127.jpg +Places365_test_00099138.jpg +Places365_test_00099141.jpg +Places365_test_00099154.jpg +Places365_test_00099155.jpg +Places365_test_00099157.jpg +Places365_test_00099173.jpg +Places365_test_00099178.jpg +Places365_test_00099181.jpg +Places365_test_00099184.jpg +Places365_test_00099225.jpg +Places365_test_00099247.jpg +Places365_test_00099249.jpg +Places365_test_00099269.jpg +Places365_test_00099281.jpg +Places365_test_00099297.jpg +Places365_test_00099301.jpg +Places365_test_00099313.jpg +Places365_test_00099327.jpg +Places365_test_00099329.jpg +Places365_test_00099344.jpg +Places365_test_00099353.jpg +Places365_test_00099380.jpg +Places365_test_00099384.jpg +Places365_test_00099395.jpg +Places365_test_00099415.jpg +Places365_test_00099419.jpg +Places365_test_00099425.jpg +Places365_test_00099430.jpg +Places365_test_00099443.jpg +Places365_test_00099460.jpg +Places365_test_00099464.jpg +Places365_test_00099465.jpg +Places365_test_00099468.jpg +Places365_test_00099486.jpg +Places365_test_00099492.jpg +Places365_test_00099498.jpg +Places365_test_00099499.jpg +Places365_test_00099507.jpg +Places365_test_00099520.jpg +Places365_test_00099533.jpg +Places365_test_00099560.jpg +Places365_test_00099568.jpg +Places365_test_00099574.jpg +Places365_test_00099590.jpg +Places365_test_00099593.jpg +Places365_test_00099595.jpg +Places365_test_00099614.jpg +Places365_test_00099658.jpg +Places365_test_00099662.jpg +Places365_test_00099679.jpg +Places365_test_00099683.jpg +Places365_test_00099690.jpg +Places365_test_00099725.jpg +Places365_test_00099727.jpg +Places365_test_00099732.jpg +Places365_test_00099754.jpg +Places365_test_00099766.jpg +Places365_test_00099767.jpg +Places365_test_00099768.jpg +Places365_test_00099776.jpg +Places365_test_00099789.jpg +Places365_test_00099792.jpg +Places365_test_00099799.jpg +Places365_test_00099806.jpg +Places365_test_00099810.jpg +Places365_test_00099814.jpg +Places365_test_00099830.jpg +Places365_test_00099843.jpg +Places365_test_00099844.jpg +Places365_test_00099847.jpg +Places365_test_00099864.jpg +Places365_test_00099867.jpg +Places365_test_00099871.jpg +Places365_test_00099875.jpg +Places365_test_00099876.jpg +Places365_test_00099896.jpg +Places365_test_00099901.jpg +Places365_test_00099904.jpg +Places365_test_00099910.jpg +Places365_test_00099915.jpg +Places365_test_00099933.jpg +Places365_test_00099977.jpg +Places365_test_00099983.jpg +Places365_test_00100002.jpg +Places365_test_00100017.jpg +Places365_test_00100027.jpg +Places365_test_00100034.jpg +Places365_test_00100039.jpg +Places365_test_00100047.jpg +Places365_test_00100076.jpg +Places365_test_00100097.jpg +Places365_test_00100098.jpg +Places365_test_00100119.jpg +Places365_test_00100138.jpg +Places365_test_00100154.jpg +Places365_test_00100167.jpg +Places365_test_00100188.jpg +Places365_test_00100203.jpg +Places365_test_00100234.jpg +Places365_test_00100246.jpg +Places365_test_00100253.jpg +Places365_test_00100254.jpg +Places365_test_00100279.jpg +Places365_test_00100282.jpg +Places365_test_00100290.jpg +Places365_test_00100292.jpg +Places365_test_00100311.jpg +Places365_test_00100312.jpg +Places365_test_00100334.jpg +Places365_test_00100338.jpg +Places365_test_00100364.jpg +Places365_test_00100375.jpg +Places365_test_00100386.jpg +Places365_test_00100406.jpg +Places365_test_00100421.jpg +Places365_test_00100426.jpg +Places365_test_00100428.jpg +Places365_test_00100436.jpg +Places365_test_00100443.jpg +Places365_test_00100445.jpg +Places365_test_00100447.jpg +Places365_test_00100453.jpg +Places365_test_00100455.jpg +Places365_test_00100462.jpg +Places365_test_00100471.jpg +Places365_test_00100493.jpg +Places365_test_00100498.jpg +Places365_test_00100501.jpg +Places365_test_00100508.jpg +Places365_test_00100516.jpg +Places365_test_00100517.jpg +Places365_test_00100542.jpg +Places365_test_00100544.jpg +Places365_test_00100573.jpg +Places365_test_00100586.jpg +Places365_test_00100587.jpg +Places365_test_00100599.jpg +Places365_test_00100603.jpg +Places365_test_00100609.jpg +Places365_test_00100634.jpg +Places365_test_00100646.jpg +Places365_test_00100657.jpg +Places365_test_00100666.jpg +Places365_test_00100670.jpg +Places365_test_00100678.jpg +Places365_test_00100684.jpg +Places365_test_00100687.jpg +Places365_test_00100704.jpg +Places365_test_00100713.jpg +Places365_test_00100715.jpg +Places365_test_00100718.jpg +Places365_test_00100722.jpg +Places365_test_00100758.jpg +Places365_test_00100760.jpg +Places365_test_00100763.jpg +Places365_test_00100768.jpg +Places365_test_00100777.jpg +Places365_test_00100784.jpg +Places365_test_00100787.jpg +Places365_test_00100794.jpg +Places365_test_00100798.jpg +Places365_test_00100817.jpg +Places365_test_00100819.jpg +Places365_test_00100824.jpg +Places365_test_00100825.jpg +Places365_test_00100850.jpg +Places365_test_00100853.jpg +Places365_test_00100858.jpg +Places365_test_00100860.jpg +Places365_test_00100870.jpg +Places365_test_00100882.jpg +Places365_test_00100884.jpg +Places365_test_00100893.jpg +Places365_test_00100894.jpg +Places365_test_00100907.jpg +Places365_test_00100921.jpg +Places365_test_00100932.jpg +Places365_test_00100953.jpg +Places365_test_00100961.jpg +Places365_test_00100963.jpg +Places365_test_00100975.jpg +Places365_test_00100978.jpg +Places365_test_00101026.jpg +Places365_test_00101033.jpg +Places365_test_00101045.jpg +Places365_test_00101052.jpg +Places365_test_00101072.jpg +Places365_test_00101075.jpg +Places365_test_00101079.jpg +Places365_test_00101110.jpg +Places365_test_00101115.jpg +Places365_test_00101117.jpg +Places365_test_00101124.jpg +Places365_test_00101130.jpg +Places365_test_00101143.jpg +Places365_test_00101152.jpg +Places365_test_00101170.jpg +Places365_test_00101190.jpg +Places365_test_00101205.jpg +Places365_test_00101206.jpg +Places365_test_00101219.jpg +Places365_test_00101223.jpg +Places365_test_00101224.jpg +Places365_test_00101230.jpg +Places365_test_00101239.jpg +Places365_test_00101240.jpg +Places365_test_00101247.jpg +Places365_test_00101269.jpg +Places365_test_00101274.jpg +Places365_test_00101276.jpg +Places365_test_00101277.jpg +Places365_test_00101281.jpg +Places365_test_00101284.jpg +Places365_test_00101300.jpg +Places365_test_00101313.jpg +Places365_test_00101320.jpg +Places365_test_00101322.jpg +Places365_test_00101344.jpg +Places365_test_00101355.jpg +Places365_test_00101361.jpg +Places365_test_00101401.jpg +Places365_test_00101411.jpg +Places365_test_00101413.jpg +Places365_test_00101418.jpg +Places365_test_00101423.jpg +Places365_test_00101442.jpg +Places365_test_00101444.jpg +Places365_test_00101450.jpg +Places365_test_00101453.jpg +Places365_test_00101476.jpg +Places365_test_00101480.jpg +Places365_test_00101483.jpg +Places365_test_00101497.jpg +Places365_test_00101502.jpg +Places365_test_00101506.jpg +Places365_test_00101513.jpg +Places365_test_00101534.jpg +Places365_test_00101545.jpg +Places365_test_00101561.jpg +Places365_test_00101566.jpg +Places365_test_00101601.jpg +Places365_test_00101606.jpg +Places365_test_00101628.jpg +Places365_test_00101630.jpg +Places365_test_00101637.jpg +Places365_test_00101645.jpg +Places365_test_00101659.jpg +Places365_test_00101668.jpg +Places365_test_00101673.jpg +Places365_test_00101686.jpg +Places365_test_00101721.jpg +Places365_test_00101726.jpg +Places365_test_00101729.jpg +Places365_test_00101744.jpg +Places365_test_00101765.jpg +Places365_test_00101769.jpg +Places365_test_00101781.jpg +Places365_test_00101806.jpg +Places365_test_00101808.jpg +Places365_test_00101830.jpg +Places365_test_00101834.jpg +Places365_test_00101848.jpg +Places365_test_00101868.jpg +Places365_test_00101885.jpg +Places365_test_00101893.jpg +Places365_test_00101903.jpg +Places365_test_00101910.jpg +Places365_test_00101914.jpg +Places365_test_00101919.jpg +Places365_test_00101929.jpg +Places365_test_00101944.jpg +Places365_test_00101948.jpg +Places365_test_00101955.jpg +Places365_test_00101956.jpg +Places365_test_00101964.jpg +Places365_test_00101972.jpg +Places365_test_00101993.jpg +Places365_test_00101996.jpg +Places365_test_00102012.jpg +Places365_test_00102020.jpg +Places365_test_00102031.jpg +Places365_test_00102045.jpg +Places365_test_00102057.jpg +Places365_test_00102059.jpg +Places365_test_00102061.jpg +Places365_test_00102082.jpg +Places365_test_00102091.jpg +Places365_test_00102094.jpg +Places365_test_00102103.jpg +Places365_test_00102111.jpg +Places365_test_00102118.jpg +Places365_test_00102122.jpg +Places365_test_00102133.jpg +Places365_test_00102148.jpg +Places365_test_00102153.jpg +Places365_test_00102172.jpg +Places365_test_00102179.jpg +Places365_test_00102194.jpg +Places365_test_00102211.jpg +Places365_test_00102212.jpg +Places365_test_00102217.jpg +Places365_test_00102223.jpg +Places365_test_00102244.jpg +Places365_test_00102246.jpg +Places365_test_00102254.jpg +Places365_test_00102256.jpg +Places365_test_00102260.jpg +Places365_test_00102266.jpg +Places365_test_00102276.jpg +Places365_test_00102293.jpg +Places365_test_00102295.jpg +Places365_test_00102301.jpg +Places365_test_00102303.jpg +Places365_test_00102308.jpg +Places365_test_00102321.jpg +Places365_test_00102332.jpg +Places365_test_00102354.jpg +Places365_test_00102361.jpg +Places365_test_00102396.jpg +Places365_test_00102401.jpg +Places365_test_00102409.jpg +Places365_test_00102417.jpg +Places365_test_00102426.jpg +Places365_test_00102443.jpg +Places365_test_00102451.jpg +Places365_test_00102456.jpg +Places365_test_00102457.jpg +Places365_test_00102461.jpg +Places365_test_00102490.jpg +Places365_test_00102502.jpg +Places365_test_00102503.jpg +Places365_test_00102528.jpg +Places365_test_00102567.jpg +Places365_test_00102575.jpg +Places365_test_00102589.jpg +Places365_test_00102594.jpg +Places365_test_00102595.jpg +Places365_test_00102597.jpg +Places365_test_00102599.jpg +Places365_test_00102610.jpg +Places365_test_00102618.jpg +Places365_test_00102633.jpg +Places365_test_00102656.jpg +Places365_test_00102670.jpg +Places365_test_00102673.jpg +Places365_test_00102685.jpg +Places365_test_00102693.jpg +Places365_test_00102703.jpg +Places365_test_00102707.jpg +Places365_test_00102727.jpg +Places365_test_00102733.jpg +Places365_test_00102735.jpg +Places365_test_00102739.jpg +Places365_test_00102745.jpg +Places365_test_00102762.jpg +Places365_test_00102782.jpg +Places365_test_00102785.jpg +Places365_test_00102795.jpg +Places365_test_00102796.jpg +Places365_test_00102811.jpg +Places365_test_00102864.jpg +Places365_test_00102869.jpg +Places365_test_00102881.jpg +Places365_test_00102937.jpg +Places365_test_00102943.jpg +Places365_test_00102999.jpg +Places365_test_00103002.jpg +Places365_test_00103003.jpg +Places365_test_00103008.jpg +Places365_test_00103010.jpg +Places365_test_00103023.jpg +Places365_test_00103037.jpg +Places365_test_00103057.jpg +Places365_test_00103061.jpg +Places365_test_00103069.jpg +Places365_test_00103074.jpg +Places365_test_00103077.jpg +Places365_test_00103094.jpg +Places365_test_00103099.jpg +Places365_test_00103116.jpg +Places365_test_00103122.jpg +Places365_test_00103130.jpg +Places365_test_00103136.jpg +Places365_test_00103149.jpg +Places365_test_00103153.jpg +Places365_test_00103177.jpg +Places365_test_00103200.jpg +Places365_test_00103214.jpg +Places365_test_00103224.jpg +Places365_test_00103228.jpg +Places365_test_00103231.jpg +Places365_test_00103238.jpg +Places365_test_00103242.jpg +Places365_test_00103250.jpg +Places365_test_00103281.jpg +Places365_test_00103284.jpg +Places365_test_00103287.jpg +Places365_test_00103301.jpg +Places365_test_00103305.jpg +Places365_test_00103317.jpg +Places365_test_00103323.jpg +Places365_test_00103327.jpg +Places365_test_00103350.jpg +Places365_test_00103353.jpg +Places365_test_00103369.jpg +Places365_test_00103383.jpg +Places365_test_00103386.jpg +Places365_test_00103387.jpg +Places365_test_00103408.jpg +Places365_test_00103418.jpg +Places365_test_00103434.jpg +Places365_test_00103442.jpg +Places365_test_00103459.jpg +Places365_test_00103465.jpg +Places365_test_00103513.jpg +Places365_test_00103525.jpg +Places365_test_00103528.jpg +Places365_test_00103572.jpg +Places365_test_00103575.jpg +Places365_test_00103589.jpg +Places365_test_00103598.jpg +Places365_test_00103600.jpg +Places365_test_00103615.jpg +Places365_test_00103618.jpg +Places365_test_00103634.jpg +Places365_test_00103638.jpg +Places365_test_00103659.jpg +Places365_test_00103664.jpg +Places365_test_00103667.jpg +Places365_test_00103695.jpg +Places365_test_00103698.jpg +Places365_test_00103747.jpg +Places365_test_00103780.jpg +Places365_test_00103781.jpg +Places365_test_00103785.jpg +Places365_test_00103797.jpg +Places365_test_00103801.jpg +Places365_test_00103856.jpg +Places365_test_00103876.jpg +Places365_test_00103886.jpg +Places365_test_00103891.jpg +Places365_test_00103924.jpg +Places365_test_00103937.jpg +Places365_test_00103938.jpg +Places365_test_00103953.jpg +Places365_test_00103981.jpg +Places365_test_00103991.jpg +Places365_test_00103993.jpg +Places365_test_00104061.jpg +Places365_test_00104067.jpg +Places365_test_00104080.jpg +Places365_test_00104083.jpg +Places365_test_00104086.jpg +Places365_test_00104089.jpg +Places365_test_00104113.jpg +Places365_test_00104136.jpg +Places365_test_00104142.jpg +Places365_test_00104147.jpg +Places365_test_00104151.jpg +Places365_test_00104152.jpg +Places365_test_00104155.jpg +Places365_test_00104156.jpg +Places365_test_00104157.jpg +Places365_test_00104159.jpg +Places365_test_00104162.jpg +Places365_test_00104169.jpg +Places365_test_00104175.jpg +Places365_test_00104181.jpg +Places365_test_00104186.jpg +Places365_test_00104193.jpg +Places365_test_00104195.jpg +Places365_test_00104202.jpg +Places365_test_00104212.jpg +Places365_test_00104218.jpg +Places365_test_00104225.jpg +Places365_test_00104261.jpg +Places365_test_00104265.jpg +Places365_test_00104268.jpg +Places365_test_00104274.jpg +Places365_test_00104275.jpg +Places365_test_00104313.jpg +Places365_test_00104324.jpg +Places365_test_00104326.jpg +Places365_test_00104345.jpg +Places365_test_00104349.jpg +Places365_test_00104361.jpg +Places365_test_00104366.jpg +Places365_test_00104407.jpg +Places365_test_00104426.jpg +Places365_test_00104428.jpg +Places365_test_00104454.jpg +Places365_test_00104456.jpg +Places365_test_00104468.jpg +Places365_test_00104472.jpg +Places365_test_00104481.jpg +Places365_test_00104489.jpg +Places365_test_00104512.jpg +Places365_test_00104515.jpg +Places365_test_00104523.jpg +Places365_test_00104571.jpg +Places365_test_00104582.jpg +Places365_test_00104583.jpg +Places365_test_00104594.jpg +Places365_test_00104604.jpg +Places365_test_00104610.jpg +Places365_test_00104611.jpg +Places365_test_00104638.jpg +Places365_test_00104640.jpg +Places365_test_00104641.jpg +Places365_test_00104656.jpg +Places365_test_00104677.jpg +Places365_test_00104687.jpg +Places365_test_00104693.jpg +Places365_test_00104712.jpg +Places365_test_00104727.jpg +Places365_test_00104734.jpg +Places365_test_00104737.jpg +Places365_test_00104740.jpg +Places365_test_00104744.jpg +Places365_test_00104754.jpg +Places365_test_00104757.jpg +Places365_test_00104773.jpg +Places365_test_00104791.jpg +Places365_test_00104796.jpg +Places365_test_00104837.jpg +Places365_test_00104840.jpg +Places365_test_00104846.jpg +Places365_test_00104853.jpg +Places365_test_00104862.jpg +Places365_test_00104888.jpg +Places365_test_00104895.jpg +Places365_test_00104897.jpg +Places365_test_00104931.jpg +Places365_test_00104938.jpg +Places365_test_00104942.jpg +Places365_test_00104952.jpg +Places365_test_00104956.jpg +Places365_test_00104971.jpg +Places365_test_00104978.jpg +Places365_test_00104984.jpg +Places365_test_00104988.jpg +Places365_test_00104997.jpg +Places365_test_00105022.jpg +Places365_test_00105032.jpg +Places365_test_00105040.jpg +Places365_test_00105087.jpg +Places365_test_00105089.jpg +Places365_test_00105103.jpg +Places365_test_00105105.jpg +Places365_test_00105113.jpg +Places365_test_00105127.jpg +Places365_test_00105135.jpg +Places365_test_00105150.jpg +Places365_test_00105164.jpg +Places365_test_00105183.jpg +Places365_test_00105186.jpg +Places365_test_00105189.jpg +Places365_test_00105200.jpg +Places365_test_00105226.jpg +Places365_test_00105237.jpg +Places365_test_00105238.jpg +Places365_test_00105251.jpg +Places365_test_00105262.jpg +Places365_test_00105269.jpg +Places365_test_00105270.jpg +Places365_test_00105282.jpg +Places365_test_00105283.jpg +Places365_test_00105296.jpg +Places365_test_00105297.jpg +Places365_test_00105306.jpg +Places365_test_00105316.jpg +Places365_test_00105343.jpg +Places365_test_00105357.jpg +Places365_test_00105369.jpg +Places365_test_00105380.jpg +Places365_test_00105383.jpg +Places365_test_00105413.jpg +Places365_test_00105422.jpg +Places365_test_00105436.jpg +Places365_test_00105441.jpg +Places365_test_00105444.jpg +Places365_test_00105457.jpg +Places365_test_00105458.jpg +Places365_test_00105464.jpg +Places365_test_00105467.jpg +Places365_test_00105482.jpg +Places365_test_00105483.jpg +Places365_test_00105551.jpg +Places365_test_00105558.jpg +Places365_test_00105565.jpg +Places365_test_00105569.jpg +Places365_test_00105580.jpg +Places365_test_00105583.jpg +Places365_test_00105585.jpg +Places365_test_00105594.jpg +Places365_test_00105625.jpg +Places365_test_00105628.jpg +Places365_test_00105633.jpg +Places365_test_00105647.jpg +Places365_test_00105672.jpg +Places365_test_00105710.jpg +Places365_test_00105713.jpg +Places365_test_00105717.jpg +Places365_test_00105720.jpg +Places365_test_00105724.jpg +Places365_test_00105727.jpg +Places365_test_00105728.jpg +Places365_test_00105741.jpg +Places365_test_00105749.jpg +Places365_test_00105756.jpg +Places365_test_00105760.jpg +Places365_test_00105767.jpg +Places365_test_00105772.jpg +Places365_test_00105776.jpg +Places365_test_00105788.jpg +Places365_test_00105793.jpg +Places365_test_00105817.jpg +Places365_test_00105821.jpg +Places365_test_00105824.jpg +Places365_test_00105825.jpg +Places365_test_00105828.jpg +Places365_test_00105836.jpg +Places365_test_00105851.jpg +Places365_test_00105865.jpg +Places365_test_00105886.jpg +Places365_test_00105887.jpg +Places365_test_00105903.jpg +Places365_test_00105922.jpg +Places365_test_00105931.jpg +Places365_test_00105938.jpg +Places365_test_00105954.jpg +Places365_test_00105970.jpg +Places365_test_00105985.jpg +Places365_test_00105989.jpg +Places365_test_00105991.jpg +Places365_test_00105995.jpg +Places365_test_00105996.jpg +Places365_test_00105999.jpg +Places365_test_00106001.jpg +Places365_test_00106011.jpg +Places365_test_00106018.jpg +Places365_test_00106022.jpg +Places365_test_00106047.jpg +Places365_test_00106092.jpg +Places365_test_00106111.jpg +Places365_test_00106126.jpg +Places365_test_00106135.jpg +Places365_test_00106148.jpg +Places365_test_00106152.jpg +Places365_test_00106183.jpg +Places365_test_00106185.jpg +Places365_test_00106192.jpg +Places365_test_00106208.jpg +Places365_test_00106221.jpg +Places365_test_00106231.jpg +Places365_test_00106232.jpg +Places365_test_00106274.jpg +Places365_test_00106284.jpg +Places365_test_00106286.jpg +Places365_test_00106301.jpg +Places365_test_00106313.jpg +Places365_test_00106321.jpg +Places365_test_00106331.jpg +Places365_test_00106334.jpg +Places365_test_00106348.jpg +Places365_test_00106374.jpg +Places365_test_00106376.jpg +Places365_test_00106384.jpg +Places365_test_00106401.jpg +Places365_test_00106440.jpg +Places365_test_00106442.jpg +Places365_test_00106445.jpg +Places365_test_00106465.jpg +Places365_test_00106473.jpg +Places365_test_00106476.jpg +Places365_test_00106482.jpg +Places365_test_00106483.jpg +Places365_test_00106498.jpg +Places365_test_00106514.jpg +Places365_test_00106528.jpg +Places365_test_00106530.jpg +Places365_test_00106551.jpg +Places365_test_00106553.jpg +Places365_test_00106558.jpg +Places365_test_00106559.jpg +Places365_test_00106563.jpg +Places365_test_00106593.jpg +Places365_test_00106596.jpg +Places365_test_00106603.jpg +Places365_test_00106604.jpg +Places365_test_00106609.jpg +Places365_test_00106615.jpg +Places365_test_00106620.jpg +Places365_test_00106628.jpg +Places365_test_00106630.jpg +Places365_test_00106635.jpg +Places365_test_00106661.jpg +Places365_test_00106662.jpg +Places365_test_00106688.jpg +Places365_test_00106702.jpg +Places365_test_00106720.jpg +Places365_test_00106741.jpg +Places365_test_00106765.jpg +Places365_test_00106787.jpg +Places365_test_00106799.jpg +Places365_test_00106891.jpg +Places365_test_00106896.jpg +Places365_test_00106899.jpg +Places365_test_00106905.jpg +Places365_test_00106909.jpg +Places365_test_00106912.jpg +Places365_test_00106915.jpg +Places365_test_00106953.jpg +Places365_test_00106959.jpg +Places365_test_00106971.jpg +Places365_test_00106972.jpg +Places365_test_00106977.jpg +Places365_test_00106993.jpg +Places365_test_00107002.jpg +Places365_test_00107006.jpg +Places365_test_00107008.jpg +Places365_test_00107009.jpg +Places365_test_00107039.jpg +Places365_test_00107048.jpg +Places365_test_00107056.jpg +Places365_test_00107069.jpg +Places365_test_00107078.jpg +Places365_test_00107098.jpg +Places365_test_00107117.jpg +Places365_test_00107119.jpg +Places365_test_00107122.jpg +Places365_test_00107127.jpg +Places365_test_00107128.jpg +Places365_test_00107142.jpg +Places365_test_00107144.jpg +Places365_test_00107170.jpg +Places365_test_00107177.jpg +Places365_test_00107184.jpg +Places365_test_00107190.jpg +Places365_test_00107242.jpg +Places365_test_00107250.jpg +Places365_test_00107260.jpg +Places365_test_00107267.jpg +Places365_test_00107282.jpg +Places365_test_00107285.jpg +Places365_test_00107291.jpg +Places365_test_00107339.jpg +Places365_test_00107345.jpg +Places365_test_00107353.jpg +Places365_test_00107365.jpg +Places365_test_00107366.jpg +Places365_test_00107367.jpg +Places365_test_00107369.jpg +Places365_test_00107390.jpg +Places365_test_00107409.jpg +Places365_test_00107425.jpg +Places365_test_00107437.jpg +Places365_test_00107464.jpg +Places365_test_00107486.jpg +Places365_test_00107505.jpg +Places365_test_00107521.jpg +Places365_test_00107527.jpg +Places365_test_00107537.jpg +Places365_test_00107539.jpg +Places365_test_00107546.jpg +Places365_test_00107551.jpg +Places365_test_00107552.jpg +Places365_test_00107563.jpg +Places365_test_00107566.jpg +Places365_test_00107569.jpg +Places365_test_00107574.jpg +Places365_test_00107591.jpg +Places365_test_00107596.jpg +Places365_test_00107624.jpg +Places365_test_00107634.jpg +Places365_test_00107637.jpg +Places365_test_00107639.jpg +Places365_test_00107656.jpg +Places365_test_00107677.jpg +Places365_test_00107691.jpg +Places365_test_00107698.jpg +Places365_test_00107711.jpg +Places365_test_00107723.jpg +Places365_test_00107731.jpg +Places365_test_00107744.jpg +Places365_test_00107765.jpg +Places365_test_00107766.jpg +Places365_test_00107779.jpg +Places365_test_00107781.jpg +Places365_test_00107783.jpg +Places365_test_00107804.jpg +Places365_test_00107821.jpg +Places365_test_00107826.jpg +Places365_test_00107827.jpg +Places365_test_00107833.jpg +Places365_test_00107844.jpg +Places365_test_00107845.jpg +Places365_test_00107853.jpg +Places365_test_00107855.jpg +Places365_test_00107866.jpg +Places365_test_00107870.jpg +Places365_test_00107872.jpg +Places365_test_00107884.jpg +Places365_test_00107892.jpg +Places365_test_00107925.jpg +Places365_test_00107927.jpg +Places365_test_00107928.jpg +Places365_test_00107930.jpg +Places365_test_00107954.jpg +Places365_test_00107956.jpg +Places365_test_00107980.jpg +Places365_test_00107998.jpg +Places365_test_00108002.jpg +Places365_test_00108009.jpg +Places365_test_00108015.jpg +Places365_test_00108029.jpg +Places365_test_00108047.jpg +Places365_test_00108112.jpg +Places365_test_00108121.jpg +Places365_test_00108123.jpg +Places365_test_00108128.jpg +Places365_test_00108155.jpg +Places365_test_00108170.jpg +Places365_test_00108172.jpg +Places365_test_00108180.jpg +Places365_test_00108193.jpg +Places365_test_00108198.jpg +Places365_test_00108211.jpg +Places365_test_00108221.jpg +Places365_test_00108231.jpg +Places365_test_00108232.jpg +Places365_test_00108242.jpg +Places365_test_00108245.jpg +Places365_test_00108260.jpg +Places365_test_00108263.jpg +Places365_test_00108280.jpg +Places365_test_00108283.jpg +Places365_test_00108302.jpg +Places365_test_00108308.jpg +Places365_test_00108313.jpg +Places365_test_00108325.jpg +Places365_test_00108331.jpg +Places365_test_00108348.jpg +Places365_test_00108359.jpg +Places365_test_00108363.jpg +Places365_test_00108383.jpg +Places365_test_00108393.jpg +Places365_test_00108402.jpg +Places365_test_00108403.jpg +Places365_test_00108432.jpg +Places365_test_00108434.jpg +Places365_test_00108449.jpg +Places365_test_00108472.jpg +Places365_test_00108489.jpg +Places365_test_00108497.jpg +Places365_test_00108514.jpg +Places365_test_00108549.jpg +Places365_test_00108566.jpg +Places365_test_00108571.jpg +Places365_test_00108573.jpg +Places365_test_00108595.jpg +Places365_test_00108621.jpg +Places365_test_00108632.jpg +Places365_test_00108642.jpg +Places365_test_00108654.jpg +Places365_test_00108664.jpg +Places365_test_00108674.jpg +Places365_test_00108693.jpg +Places365_test_00108715.jpg +Places365_test_00108717.jpg +Places365_test_00108728.jpg +Places365_test_00108730.jpg +Places365_test_00108736.jpg +Places365_test_00108740.jpg +Places365_test_00108767.jpg +Places365_test_00108780.jpg +Places365_test_00108782.jpg +Places365_test_00108786.jpg +Places365_test_00108792.jpg +Places365_test_00108803.jpg +Places365_test_00108820.jpg +Places365_test_00108837.jpg +Places365_test_00108844.jpg +Places365_test_00108845.jpg +Places365_test_00108850.jpg +Places365_test_00108854.jpg +Places365_test_00108856.jpg +Places365_test_00108857.jpg +Places365_test_00108862.jpg +Places365_test_00108876.jpg +Places365_test_00108879.jpg +Places365_test_00108888.jpg +Places365_test_00108897.jpg +Places365_test_00108904.jpg +Places365_test_00108917.jpg +Places365_test_00108920.jpg +Places365_test_00108956.jpg +Places365_test_00108959.jpg +Places365_test_00108965.jpg +Places365_test_00108972.jpg +Places365_test_00108987.jpg +Places365_test_00108991.jpg +Places365_test_00109012.jpg +Places365_test_00109014.jpg +Places365_test_00109017.jpg +Places365_test_00109025.jpg +Places365_test_00109033.jpg +Places365_test_00109056.jpg +Places365_test_00109065.jpg +Places365_test_00109068.jpg +Places365_test_00109075.jpg +Places365_test_00109136.jpg +Places365_test_00109153.jpg +Places365_test_00109158.jpg +Places365_test_00109160.jpg +Places365_test_00109165.jpg +Places365_test_00109195.jpg +Places365_test_00109215.jpg +Places365_test_00109217.jpg +Places365_test_00109230.jpg +Places365_test_00109236.jpg +Places365_test_00109263.jpg +Places365_test_00109268.jpg +Places365_test_00109281.jpg +Places365_test_00109284.jpg +Places365_test_00109287.jpg +Places365_test_00109292.jpg +Places365_test_00109314.jpg +Places365_test_00109317.jpg +Places365_test_00109337.jpg +Places365_test_00109342.jpg +Places365_test_00109350.jpg +Places365_test_00109370.jpg +Places365_test_00109375.jpg +Places365_test_00109397.jpg +Places365_test_00109400.jpg +Places365_test_00109403.jpg +Places365_test_00109426.jpg +Places365_test_00109439.jpg +Places365_test_00109443.jpg +Places365_test_00109462.jpg +Places365_test_00109483.jpg +Places365_test_00109486.jpg +Places365_test_00109489.jpg +Places365_test_00109498.jpg +Places365_test_00109522.jpg +Places365_test_00109523.jpg +Places365_test_00109527.jpg +Places365_test_00109534.jpg +Places365_test_00109557.jpg +Places365_test_00109563.jpg +Places365_test_00109564.jpg +Places365_test_00109565.jpg +Places365_test_00109577.jpg +Places365_test_00109614.jpg +Places365_test_00109646.jpg +Places365_test_00109648.jpg +Places365_test_00109661.jpg +Places365_test_00109664.jpg +Places365_test_00109666.jpg +Places365_test_00109674.jpg +Places365_test_00109690.jpg +Places365_test_00109704.jpg +Places365_test_00109707.jpg +Places365_test_00109710.jpg +Places365_test_00109717.jpg +Places365_test_00109726.jpg +Places365_test_00109738.jpg +Places365_test_00109749.jpg +Places365_test_00109754.jpg +Places365_test_00109756.jpg +Places365_test_00109764.jpg +Places365_test_00109773.jpg +Places365_test_00109774.jpg +Places365_test_00109787.jpg +Places365_test_00109796.jpg +Places365_test_00109798.jpg +Places365_test_00109799.jpg +Places365_test_00109808.jpg +Places365_test_00109824.jpg +Places365_test_00109829.jpg +Places365_test_00109840.jpg +Places365_test_00109842.jpg +Places365_test_00109849.jpg +Places365_test_00109852.jpg +Places365_test_00109855.jpg +Places365_test_00109859.jpg +Places365_test_00109860.jpg +Places365_test_00109868.jpg +Places365_test_00109884.jpg +Places365_test_00109899.jpg +Places365_test_00109900.jpg +Places365_test_00109930.jpg +Places365_test_00109936.jpg +Places365_test_00109949.jpg +Places365_test_00109991.jpg +Places365_test_00110000.jpg +Places365_test_00110008.jpg +Places365_test_00110013.jpg +Places365_test_00110034.jpg +Places365_test_00110056.jpg +Places365_test_00110059.jpg +Places365_test_00110060.jpg +Places365_test_00110069.jpg +Places365_test_00110082.jpg +Places365_test_00110083.jpg +Places365_test_00110095.jpg +Places365_test_00110106.jpg +Places365_test_00110112.jpg +Places365_test_00110119.jpg +Places365_test_00110130.jpg +Places365_test_00110141.jpg +Places365_test_00110148.jpg +Places365_test_00110158.jpg +Places365_test_00110178.jpg +Places365_test_00110198.jpg +Places365_test_00110203.jpg +Places365_test_00110210.jpg +Places365_test_00110221.jpg +Places365_test_00110222.jpg +Places365_test_00110223.jpg +Places365_test_00110235.jpg +Places365_test_00110236.jpg +Places365_test_00110253.jpg +Places365_test_00110260.jpg +Places365_test_00110275.jpg +Places365_test_00110289.jpg +Places365_test_00110312.jpg +Places365_test_00110322.jpg +Places365_test_00110357.jpg +Places365_test_00110374.jpg +Places365_test_00110376.jpg +Places365_test_00110379.jpg +Places365_test_00110386.jpg +Places365_test_00110391.jpg +Places365_test_00110394.jpg +Places365_test_00110407.jpg +Places365_test_00110416.jpg +Places365_test_00110433.jpg +Places365_test_00110445.jpg +Places365_test_00110450.jpg +Places365_test_00110452.jpg +Places365_test_00110459.jpg +Places365_test_00110467.jpg +Places365_test_00110479.jpg +Places365_test_00110497.jpg +Places365_test_00110503.jpg +Places365_test_00110521.jpg +Places365_test_00110528.jpg +Places365_test_00110537.jpg +Places365_test_00110540.jpg +Places365_test_00110558.jpg +Places365_test_00110571.jpg +Places365_test_00110577.jpg +Places365_test_00110592.jpg +Places365_test_00110595.jpg +Places365_test_00110599.jpg +Places365_test_00110611.jpg +Places365_test_00110621.jpg +Places365_test_00110623.jpg +Places365_test_00110626.jpg +Places365_test_00110627.jpg +Places365_test_00110631.jpg +Places365_test_00110642.jpg +Places365_test_00110653.jpg +Places365_test_00110658.jpg +Places365_test_00110660.jpg +Places365_test_00110670.jpg +Places365_test_00110714.jpg +Places365_test_00110720.jpg +Places365_test_00110723.jpg +Places365_test_00110732.jpg +Places365_test_00110738.jpg +Places365_test_00110749.jpg +Places365_test_00110812.jpg +Places365_test_00110821.jpg +Places365_test_00110849.jpg +Places365_test_00110862.jpg +Places365_test_00110871.jpg +Places365_test_00110874.jpg +Places365_test_00110875.jpg +Places365_test_00110876.jpg +Places365_test_00110881.jpg +Places365_test_00110884.jpg +Places365_test_00110899.jpg +Places365_test_00110907.jpg +Places365_test_00110932.jpg +Places365_test_00110952.jpg +Places365_test_00110956.jpg +Places365_test_00110964.jpg +Places365_test_00110975.jpg +Places365_test_00110985.jpg +Places365_test_00111003.jpg +Places365_test_00111012.jpg +Places365_test_00111070.jpg +Places365_test_00111075.jpg +Places365_test_00111078.jpg +Places365_test_00111084.jpg +Places365_test_00111105.jpg +Places365_test_00111130.jpg +Places365_test_00111132.jpg +Places365_test_00111139.jpg +Places365_test_00111166.jpg +Places365_test_00111167.jpg +Places365_test_00111171.jpg +Places365_test_00111183.jpg +Places365_test_00111184.jpg +Places365_test_00111188.jpg +Places365_test_00111189.jpg +Places365_test_00111191.jpg +Places365_test_00111202.jpg +Places365_test_00111213.jpg +Places365_test_00111220.jpg +Places365_test_00111221.jpg +Places365_test_00111226.jpg +Places365_test_00111240.jpg +Places365_test_00111245.jpg +Places365_test_00111248.jpg +Places365_test_00111260.jpg +Places365_test_00111268.jpg +Places365_test_00111276.jpg +Places365_test_00111312.jpg +Places365_test_00111335.jpg +Places365_test_00111355.jpg +Places365_test_00111363.jpg +Places365_test_00111366.jpg +Places365_test_00111369.jpg +Places365_test_00111374.jpg +Places365_test_00111386.jpg +Places365_test_00111394.jpg +Places365_test_00111406.jpg +Places365_test_00111432.jpg +Places365_test_00111445.jpg +Places365_test_00111469.jpg +Places365_test_00111471.jpg +Places365_test_00111515.jpg +Places365_test_00111529.jpg +Places365_test_00111554.jpg +Places365_test_00111562.jpg +Places365_test_00111590.jpg +Places365_test_00111596.jpg +Places365_test_00111601.jpg +Places365_test_00111605.jpg +Places365_test_00111609.jpg +Places365_test_00111629.jpg +Places365_test_00111641.jpg +Places365_test_00111654.jpg +Places365_test_00111662.jpg +Places365_test_00111678.jpg +Places365_test_00111684.jpg +Places365_test_00111691.jpg +Places365_test_00111706.jpg +Places365_test_00111715.jpg +Places365_test_00111726.jpg +Places365_test_00111728.jpg +Places365_test_00111740.jpg +Places365_test_00111750.jpg +Places365_test_00111763.jpg +Places365_test_00111767.jpg +Places365_test_00111775.jpg +Places365_test_00111780.jpg +Places365_test_00111781.jpg +Places365_test_00111788.jpg +Places365_test_00111795.jpg +Places365_test_00111814.jpg +Places365_test_00111816.jpg +Places365_test_00111829.jpg +Places365_test_00111838.jpg +Places365_test_00111855.jpg +Places365_test_00111857.jpg +Places365_test_00111875.jpg +Places365_test_00111907.jpg +Places365_test_00111911.jpg +Places365_test_00111935.jpg +Places365_test_00111942.jpg +Places365_test_00111947.jpg +Places365_test_00111958.jpg +Places365_test_00111962.jpg +Places365_test_00111964.jpg +Places365_test_00111968.jpg +Places365_test_00111971.jpg +Places365_test_00111973.jpg +Places365_test_00111979.jpg +Places365_test_00111981.jpg +Places365_test_00111994.jpg +Places365_test_00112003.jpg +Places365_test_00112010.jpg +Places365_test_00112037.jpg +Places365_test_00112060.jpg +Places365_test_00112066.jpg +Places365_test_00112069.jpg +Places365_test_00112075.jpg +Places365_test_00112106.jpg +Places365_test_00112113.jpg +Places365_test_00112118.jpg +Places365_test_00112147.jpg +Places365_test_00112151.jpg +Places365_test_00112158.jpg +Places365_test_00112174.jpg +Places365_test_00112186.jpg +Places365_test_00112193.jpg +Places365_test_00112195.jpg +Places365_test_00112198.jpg +Places365_test_00112229.jpg +Places365_test_00112240.jpg +Places365_test_00112242.jpg +Places365_test_00112255.jpg +Places365_test_00112258.jpg +Places365_test_00112262.jpg +Places365_test_00112274.jpg +Places365_test_00112278.jpg +Places365_test_00112295.jpg +Places365_test_00112325.jpg +Places365_test_00112346.jpg +Places365_test_00112348.jpg +Places365_test_00112370.jpg +Places365_test_00112375.jpg +Places365_test_00112377.jpg +Places365_test_00112378.jpg +Places365_test_00112387.jpg +Places365_test_00112390.jpg +Places365_test_00112421.jpg +Places365_test_00112427.jpg +Places365_test_00112429.jpg +Places365_test_00112446.jpg +Places365_test_00112449.jpg +Places365_test_00112480.jpg +Places365_test_00112493.jpg +Places365_test_00112529.jpg +Places365_test_00112538.jpg +Places365_test_00112554.jpg +Places365_test_00112563.jpg +Places365_test_00112574.jpg +Places365_test_00112575.jpg +Places365_test_00112577.jpg +Places365_test_00112592.jpg +Places365_test_00112595.jpg +Places365_test_00112603.jpg +Places365_test_00112628.jpg +Places365_test_00112636.jpg +Places365_test_00112641.jpg +Places365_test_00112652.jpg +Places365_test_00112657.jpg +Places365_test_00112672.jpg +Places365_test_00112693.jpg +Places365_test_00112703.jpg +Places365_test_00112709.jpg +Places365_test_00112725.jpg +Places365_test_00112726.jpg +Places365_test_00112731.jpg +Places365_test_00112741.jpg +Places365_test_00112751.jpg +Places365_test_00112761.jpg +Places365_test_00112765.jpg +Places365_test_00112767.jpg +Places365_test_00112768.jpg +Places365_test_00112775.jpg +Places365_test_00112788.jpg +Places365_test_00112798.jpg +Places365_test_00112801.jpg +Places365_test_00112808.jpg +Places365_test_00112817.jpg +Places365_test_00112830.jpg +Places365_test_00112845.jpg +Places365_test_00112860.jpg +Places365_test_00112866.jpg +Places365_test_00112867.jpg +Places365_test_00112869.jpg +Places365_test_00112894.jpg +Places365_test_00112899.jpg +Places365_test_00112912.jpg +Places365_test_00112920.jpg +Places365_test_00112929.jpg +Places365_test_00112937.jpg +Places365_test_00112944.jpg +Places365_test_00112949.jpg +Places365_test_00112964.jpg +Places365_test_00112975.jpg +Places365_test_00112981.jpg +Places365_test_00112989.jpg +Places365_test_00112990.jpg +Places365_test_00113000.jpg +Places365_test_00113009.jpg +Places365_test_00113015.jpg +Places365_test_00113018.jpg +Places365_test_00113033.jpg +Places365_test_00113034.jpg +Places365_test_00113038.jpg +Places365_test_00113053.jpg +Places365_test_00113064.jpg +Places365_test_00113075.jpg +Places365_test_00113085.jpg +Places365_test_00113111.jpg +Places365_test_00113113.jpg +Places365_test_00113122.jpg +Places365_test_00113123.jpg +Places365_test_00113153.jpg +Places365_test_00113163.jpg +Places365_test_00113171.jpg +Places365_test_00113173.jpg +Places365_test_00113188.jpg +Places365_test_00113207.jpg +Places365_test_00113214.jpg +Places365_test_00113222.jpg +Places365_test_00113230.jpg +Places365_test_00113232.jpg +Places365_test_00113243.jpg +Places365_test_00113251.jpg +Places365_test_00113266.jpg +Places365_test_00113269.jpg +Places365_test_00113305.jpg +Places365_test_00113314.jpg +Places365_test_00113335.jpg +Places365_test_00113359.jpg +Places365_test_00113374.jpg +Places365_test_00113380.jpg +Places365_test_00113394.jpg +Places365_test_00113422.jpg +Places365_test_00113423.jpg +Places365_test_00113431.jpg +Places365_test_00113441.jpg +Places365_test_00113446.jpg +Places365_test_00113469.jpg +Places365_test_00113478.jpg +Places365_test_00113480.jpg +Places365_test_00113481.jpg +Places365_test_00113521.jpg +Places365_test_00113525.jpg +Places365_test_00113529.jpg +Places365_test_00113531.jpg +Places365_test_00113544.jpg +Places365_test_00113554.jpg +Places365_test_00113575.jpg +Places365_test_00113590.jpg +Places365_test_00113612.jpg +Places365_test_00113614.jpg +Places365_test_00113629.jpg +Places365_test_00113634.jpg +Places365_test_00113636.jpg +Places365_test_00113643.jpg +Places365_test_00113667.jpg +Places365_test_00113685.jpg +Places365_test_00113693.jpg +Places365_test_00113723.jpg +Places365_test_00113734.jpg +Places365_test_00113736.jpg +Places365_test_00113781.jpg +Places365_test_00113786.jpg +Places365_test_00113806.jpg +Places365_test_00113809.jpg +Places365_test_00113813.jpg +Places365_test_00113823.jpg +Places365_test_00113833.jpg +Places365_test_00113835.jpg +Places365_test_00113844.jpg +Places365_test_00113853.jpg +Places365_test_00113859.jpg +Places365_test_00113867.jpg +Places365_test_00113869.jpg +Places365_test_00113883.jpg +Places365_test_00113884.jpg +Places365_test_00113888.jpg +Places365_test_00113890.jpg +Places365_test_00113901.jpg +Places365_test_00113906.jpg +Places365_test_00113924.jpg +Places365_test_00113930.jpg +Places365_test_00113942.jpg +Places365_test_00113957.jpg +Places365_test_00113982.jpg +Places365_test_00113988.jpg +Places365_test_00113991.jpg +Places365_test_00114002.jpg +Places365_test_00114003.jpg +Places365_test_00114031.jpg +Places365_test_00114036.jpg +Places365_test_00114047.jpg +Places365_test_00114067.jpg +Places365_test_00114070.jpg +Places365_test_00114072.jpg +Places365_test_00114121.jpg +Places365_test_00114152.jpg +Places365_test_00114169.jpg +Places365_test_00114173.jpg +Places365_test_00114183.jpg +Places365_test_00114194.jpg +Places365_test_00114226.jpg +Places365_test_00114238.jpg +Places365_test_00114240.jpg +Places365_test_00114274.jpg +Places365_test_00114280.jpg +Places365_test_00114283.jpg +Places365_test_00114288.jpg +Places365_test_00114289.jpg +Places365_test_00114305.jpg +Places365_test_00114329.jpg +Places365_test_00114343.jpg +Places365_test_00114352.jpg +Places365_test_00114353.jpg +Places365_test_00114358.jpg +Places365_test_00114360.jpg +Places365_test_00114363.jpg +Places365_test_00114364.jpg +Places365_test_00114384.jpg +Places365_test_00114399.jpg +Places365_test_00114407.jpg +Places365_test_00114408.jpg +Places365_test_00114413.jpg +Places365_test_00114419.jpg +Places365_test_00114427.jpg +Places365_test_00114431.jpg +Places365_test_00114434.jpg +Places365_test_00114454.jpg +Places365_test_00114461.jpg +Places365_test_00114465.jpg +Places365_test_00114468.jpg +Places365_test_00114470.jpg +Places365_test_00114474.jpg +Places365_test_00114475.jpg +Places365_test_00114486.jpg +Places365_test_00114489.jpg +Places365_test_00114494.jpg +Places365_test_00114504.jpg +Places365_test_00114507.jpg +Places365_test_00114514.jpg +Places365_test_00114520.jpg +Places365_test_00114534.jpg +Places365_test_00114538.jpg +Places365_test_00114551.jpg +Places365_test_00114556.jpg +Places365_test_00114558.jpg +Places365_test_00114562.jpg +Places365_test_00114566.jpg +Places365_test_00114585.jpg +Places365_test_00114594.jpg +Places365_test_00114607.jpg +Places365_test_00114609.jpg +Places365_test_00114626.jpg +Places365_test_00114637.jpg +Places365_test_00114639.jpg +Places365_test_00114643.jpg +Places365_test_00114645.jpg +Places365_test_00114647.jpg +Places365_test_00114664.jpg +Places365_test_00114684.jpg +Places365_test_00114715.jpg +Places365_test_00114723.jpg +Places365_test_00114732.jpg +Places365_test_00114755.jpg +Places365_test_00114762.jpg +Places365_test_00114767.jpg +Places365_test_00114771.jpg +Places365_test_00114792.jpg +Places365_test_00114805.jpg +Places365_test_00114833.jpg +Places365_test_00114848.jpg +Places365_test_00114856.jpg +Places365_test_00114858.jpg +Places365_test_00114867.jpg +Places365_test_00114872.jpg +Places365_test_00114876.jpg +Places365_test_00114890.jpg +Places365_test_00114891.jpg +Places365_test_00114892.jpg +Places365_test_00114903.jpg +Places365_test_00114905.jpg +Places365_test_00114913.jpg +Places365_test_00114918.jpg +Places365_test_00114928.jpg +Places365_test_00114932.jpg +Places365_test_00114939.jpg +Places365_test_00114942.jpg +Places365_test_00114944.jpg +Places365_test_00114960.jpg +Places365_test_00114973.jpg +Places365_test_00114986.jpg +Places365_test_00114997.jpg +Places365_test_00114998.jpg +Places365_test_00115018.jpg +Places365_test_00115049.jpg +Places365_test_00115058.jpg +Places365_test_00115081.jpg +Places365_test_00115115.jpg +Places365_test_00115119.jpg +Places365_test_00115130.jpg +Places365_test_00115149.jpg +Places365_test_00115178.jpg +Places365_test_00115198.jpg +Places365_test_00115217.jpg +Places365_test_00115226.jpg +Places365_test_00115235.jpg +Places365_test_00115236.jpg +Places365_test_00115241.jpg +Places365_test_00115243.jpg +Places365_test_00115263.jpg +Places365_test_00115277.jpg +Places365_test_00115292.jpg +Places365_test_00115294.jpg +Places365_test_00115300.jpg +Places365_test_00115302.jpg +Places365_test_00115315.jpg +Places365_test_00115321.jpg +Places365_test_00115335.jpg +Places365_test_00115339.jpg +Places365_test_00115354.jpg +Places365_test_00115360.jpg +Places365_test_00115364.jpg +Places365_test_00115365.jpg +Places365_test_00115366.jpg +Places365_test_00115377.jpg +Places365_test_00115388.jpg +Places365_test_00115389.jpg +Places365_test_00115392.jpg +Places365_test_00115397.jpg +Places365_test_00115406.jpg +Places365_test_00115410.jpg +Places365_test_00115437.jpg +Places365_test_00115452.jpg +Places365_test_00115458.jpg +Places365_test_00115460.jpg +Places365_test_00115480.jpg +Places365_test_00115488.jpg +Places365_test_00115491.jpg +Places365_test_00115494.jpg +Places365_test_00115495.jpg +Places365_test_00115511.jpg +Places365_test_00115512.jpg +Places365_test_00115523.jpg +Places365_test_00115524.jpg +Places365_test_00115531.jpg +Places365_test_00115564.jpg +Places365_test_00115565.jpg +Places365_test_00115583.jpg +Places365_test_00115584.jpg +Places365_test_00115587.jpg +Places365_test_00115596.jpg +Places365_test_00115601.jpg +Places365_test_00115618.jpg +Places365_test_00115637.jpg +Places365_test_00115638.jpg +Places365_test_00115640.jpg +Places365_test_00115659.jpg +Places365_test_00115664.jpg +Places365_test_00115670.jpg +Places365_test_00115682.jpg +Places365_test_00115688.jpg +Places365_test_00115696.jpg +Places365_test_00115698.jpg +Places365_test_00115721.jpg +Places365_test_00115726.jpg +Places365_test_00115745.jpg +Places365_test_00115749.jpg +Places365_test_00115756.jpg +Places365_test_00115768.jpg +Places365_test_00115779.jpg +Places365_test_00115786.jpg +Places365_test_00115787.jpg +Places365_test_00115817.jpg +Places365_test_00115829.jpg +Places365_test_00115842.jpg +Places365_test_00115848.jpg +Places365_test_00115864.jpg +Places365_test_00115868.jpg +Places365_test_00115898.jpg +Places365_test_00115915.jpg +Places365_test_00115916.jpg +Places365_test_00115952.jpg +Places365_test_00115955.jpg +Places365_test_00115960.jpg +Places365_test_00115979.jpg +Places365_test_00115981.jpg +Places365_test_00115985.jpg +Places365_test_00115993.jpg +Places365_test_00116008.jpg +Places365_test_00116011.jpg +Places365_test_00116015.jpg +Places365_test_00116025.jpg +Places365_test_00116033.jpg +Places365_test_00116069.jpg +Places365_test_00116070.jpg +Places365_test_00116081.jpg +Places365_test_00116089.jpg +Places365_test_00116090.jpg +Places365_test_00116098.jpg +Places365_test_00116102.jpg +Places365_test_00116103.jpg +Places365_test_00116107.jpg +Places365_test_00116114.jpg +Places365_test_00116121.jpg +Places365_test_00116137.jpg +Places365_test_00116164.jpg +Places365_test_00116179.jpg +Places365_test_00116200.jpg +Places365_test_00116206.jpg +Places365_test_00116214.jpg +Places365_test_00116216.jpg +Places365_test_00116223.jpg +Places365_test_00116228.jpg +Places365_test_00116242.jpg +Places365_test_00116253.jpg +Places365_test_00116258.jpg +Places365_test_00116261.jpg +Places365_test_00116269.jpg +Places365_test_00116271.jpg +Places365_test_00116283.jpg +Places365_test_00116288.jpg +Places365_test_00116293.jpg +Places365_test_00116295.jpg +Places365_test_00116302.jpg +Places365_test_00116309.jpg +Places365_test_00116329.jpg +Places365_test_00116338.jpg +Places365_test_00116339.jpg +Places365_test_00116348.jpg +Places365_test_00116382.jpg +Places365_test_00116386.jpg +Places365_test_00116414.jpg +Places365_test_00116423.jpg +Places365_test_00116449.jpg +Places365_test_00116453.jpg +Places365_test_00116456.jpg +Places365_test_00116467.jpg +Places365_test_00116491.jpg +Places365_test_00116516.jpg +Places365_test_00116523.jpg +Places365_test_00116525.jpg +Places365_test_00116534.jpg +Places365_test_00116540.jpg +Places365_test_00116552.jpg +Places365_test_00116556.jpg +Places365_test_00116567.jpg +Places365_test_00116569.jpg +Places365_test_00116577.jpg +Places365_test_00116586.jpg +Places365_test_00116601.jpg +Places365_test_00116620.jpg +Places365_test_00116625.jpg +Places365_test_00116629.jpg +Places365_test_00116642.jpg +Places365_test_00116664.jpg +Places365_test_00116679.jpg +Places365_test_00116708.jpg +Places365_test_00116728.jpg +Places365_test_00116746.jpg +Places365_test_00116753.jpg +Places365_test_00116767.jpg +Places365_test_00116781.jpg +Places365_test_00116786.jpg +Places365_test_00116822.jpg +Places365_test_00116823.jpg +Places365_test_00116828.jpg +Places365_test_00116831.jpg +Places365_test_00116846.jpg +Places365_test_00116853.jpg +Places365_test_00116863.jpg +Places365_test_00116894.jpg +Places365_test_00116897.jpg +Places365_test_00116899.jpg +Places365_test_00116916.jpg +Places365_test_00116927.jpg +Places365_test_00116929.jpg +Places365_test_00116932.jpg +Places365_test_00116943.jpg +Places365_test_00116944.jpg +Places365_test_00116960.jpg +Places365_test_00116966.jpg +Places365_test_00116978.jpg +Places365_test_00116986.jpg +Places365_test_00116996.jpg +Places365_test_00117008.jpg +Places365_test_00117034.jpg +Places365_test_00117037.jpg +Places365_test_00117071.jpg +Places365_test_00117079.jpg +Places365_test_00117084.jpg +Places365_test_00117101.jpg +Places365_test_00117102.jpg +Places365_test_00117104.jpg +Places365_test_00117108.jpg +Places365_test_00117111.jpg +Places365_test_00117135.jpg +Places365_test_00117137.jpg +Places365_test_00117141.jpg +Places365_test_00117148.jpg +Places365_test_00117163.jpg +Places365_test_00117165.jpg +Places365_test_00117187.jpg +Places365_test_00117197.jpg +Places365_test_00117204.jpg +Places365_test_00117215.jpg +Places365_test_00117222.jpg +Places365_test_00117224.jpg +Places365_test_00117231.jpg +Places365_test_00117269.jpg +Places365_test_00117287.jpg +Places365_test_00117291.jpg +Places365_test_00117302.jpg +Places365_test_00117304.jpg +Places365_test_00117314.jpg +Places365_test_00117328.jpg +Places365_test_00117334.jpg +Places365_test_00117338.jpg +Places365_test_00117350.jpg +Places365_test_00117371.jpg +Places365_test_00117378.jpg +Places365_test_00117398.jpg +Places365_test_00117427.jpg +Places365_test_00117432.jpg +Places365_test_00117455.jpg +Places365_test_00117461.jpg +Places365_test_00117463.jpg +Places365_test_00117483.jpg +Places365_test_00117494.jpg +Places365_test_00117495.jpg +Places365_test_00117500.jpg +Places365_test_00117543.jpg +Places365_test_00117554.jpg +Places365_test_00117556.jpg +Places365_test_00117563.jpg +Places365_test_00117578.jpg +Places365_test_00117581.jpg +Places365_test_00117596.jpg +Places365_test_00117618.jpg +Places365_test_00117625.jpg +Places365_test_00117628.jpg +Places365_test_00117638.jpg +Places365_test_00117687.jpg +Places365_test_00117691.jpg +Places365_test_00117703.jpg +Places365_test_00117704.jpg +Places365_test_00117713.jpg +Places365_test_00117722.jpg +Places365_test_00117732.jpg +Places365_test_00117741.jpg +Places365_test_00117760.jpg +Places365_test_00117777.jpg +Places365_test_00117782.jpg +Places365_test_00117797.jpg +Places365_test_00117819.jpg +Places365_test_00117821.jpg +Places365_test_00117822.jpg +Places365_test_00117828.jpg +Places365_test_00117847.jpg +Places365_test_00117859.jpg +Places365_test_00117868.jpg +Places365_test_00117884.jpg +Places365_test_00117893.jpg +Places365_test_00117895.jpg +Places365_test_00117900.jpg +Places365_test_00117910.jpg +Places365_test_00117931.jpg +Places365_test_00117950.jpg +Places365_test_00117955.jpg +Places365_test_00117963.jpg +Places365_test_00117975.jpg +Places365_test_00117982.jpg +Places365_test_00117993.jpg +Places365_test_00118003.jpg +Places365_test_00118008.jpg +Places365_test_00118012.jpg +Places365_test_00118021.jpg +Places365_test_00118022.jpg +Places365_test_00118034.jpg +Places365_test_00118046.jpg +Places365_test_00118077.jpg +Places365_test_00118099.jpg +Places365_test_00118119.jpg +Places365_test_00118123.jpg +Places365_test_00118129.jpg +Places365_test_00118132.jpg +Places365_test_00118136.jpg +Places365_test_00118140.jpg +Places365_test_00118148.jpg +Places365_test_00118150.jpg +Places365_test_00118170.jpg +Places365_test_00118174.jpg +Places365_test_00118183.jpg +Places365_test_00118185.jpg +Places365_test_00118191.jpg +Places365_test_00118192.jpg +Places365_test_00118194.jpg +Places365_test_00118195.jpg +Places365_test_00118203.jpg +Places365_test_00118209.jpg +Places365_test_00118220.jpg +Places365_test_00118223.jpg +Places365_test_00118248.jpg +Places365_test_00118251.jpg +Places365_test_00118261.jpg +Places365_test_00118292.jpg +Places365_test_00118293.jpg +Places365_test_00118308.jpg +Places365_test_00118313.jpg +Places365_test_00118317.jpg +Places365_test_00118326.jpg +Places365_test_00118335.jpg +Places365_test_00118363.jpg +Places365_test_00118368.jpg +Places365_test_00118372.jpg +Places365_test_00118384.jpg +Places365_test_00118388.jpg +Places365_test_00118414.jpg +Places365_test_00118437.jpg +Places365_test_00118442.jpg +Places365_test_00118448.jpg +Places365_test_00118458.jpg +Places365_test_00118481.jpg +Places365_test_00118504.jpg +Places365_test_00118506.jpg +Places365_test_00118530.jpg +Places365_test_00118555.jpg +Places365_test_00118558.jpg +Places365_test_00118571.jpg +Places365_test_00118575.jpg +Places365_test_00118586.jpg +Places365_test_00118590.jpg +Places365_test_00118601.jpg +Places365_test_00118608.jpg +Places365_test_00118645.jpg +Places365_test_00118650.jpg +Places365_test_00118656.jpg +Places365_test_00118659.jpg +Places365_test_00118661.jpg +Places365_test_00118665.jpg +Places365_test_00118670.jpg +Places365_test_00118680.jpg +Places365_test_00118684.jpg +Places365_test_00118686.jpg +Places365_test_00118688.jpg +Places365_test_00118693.jpg +Places365_test_00118695.jpg +Places365_test_00118697.jpg +Places365_test_00118701.jpg +Places365_test_00118708.jpg +Places365_test_00118717.jpg +Places365_test_00118738.jpg +Places365_test_00118745.jpg +Places365_test_00118752.jpg +Places365_test_00118762.jpg +Places365_test_00118788.jpg +Places365_test_00118817.jpg +Places365_test_00118881.jpg +Places365_test_00118884.jpg +Places365_test_00118891.jpg +Places365_test_00118902.jpg +Places365_test_00118903.jpg +Places365_test_00118923.jpg +Places365_test_00118942.jpg +Places365_test_00118955.jpg +Places365_test_00118957.jpg +Places365_test_00118958.jpg +Places365_test_00118973.jpg +Places365_test_00118976.jpg +Places365_test_00118991.jpg +Places365_test_00119011.jpg +Places365_test_00119019.jpg +Places365_test_00119020.jpg +Places365_test_00119028.jpg +Places365_test_00119045.jpg +Places365_test_00119063.jpg +Places365_test_00119073.jpg +Places365_test_00119077.jpg +Places365_test_00119089.jpg +Places365_test_00119111.jpg +Places365_test_00119117.jpg +Places365_test_00119121.jpg +Places365_test_00119136.jpg +Places365_test_00119151.jpg +Places365_test_00119171.jpg +Places365_test_00119172.jpg +Places365_test_00119188.jpg +Places365_test_00119224.jpg +Places365_test_00119234.jpg +Places365_test_00119236.jpg +Places365_test_00119260.jpg +Places365_test_00119267.jpg +Places365_test_00119275.jpg +Places365_test_00119296.jpg +Places365_test_00119312.jpg +Places365_test_00119333.jpg +Places365_test_00119350.jpg +Places365_test_00119353.jpg +Places365_test_00119368.jpg +Places365_test_00119372.jpg +Places365_test_00119410.jpg +Places365_test_00119420.jpg +Places365_test_00119433.jpg +Places365_test_00119440.jpg +Places365_test_00119459.jpg +Places365_test_00119461.jpg +Places365_test_00119463.jpg +Places365_test_00119469.jpg +Places365_test_00119470.jpg +Places365_test_00119474.jpg +Places365_test_00119492.jpg +Places365_test_00119499.jpg +Places365_test_00119528.jpg +Places365_test_00119542.jpg +Places365_test_00119551.jpg +Places365_test_00119569.jpg +Places365_test_00119573.jpg +Places365_test_00119603.jpg +Places365_test_00119611.jpg +Places365_test_00119612.jpg +Places365_test_00119621.jpg +Places365_test_00119625.jpg +Places365_test_00119632.jpg +Places365_test_00119661.jpg +Places365_test_00119663.jpg +Places365_test_00119665.jpg +Places365_test_00119703.jpg +Places365_test_00119708.jpg +Places365_test_00119719.jpg +Places365_test_00119742.jpg +Places365_test_00119746.jpg +Places365_test_00119747.jpg +Places365_test_00119751.jpg +Places365_test_00119777.jpg +Places365_test_00119778.jpg +Places365_test_00119788.jpg +Places365_test_00119795.jpg +Places365_test_00119800.jpg +Places365_test_00119807.jpg +Places365_test_00119883.jpg +Places365_test_00119889.jpg +Places365_test_00119890.jpg +Places365_test_00119921.jpg +Places365_test_00119927.jpg +Places365_test_00119931.jpg +Places365_test_00119935.jpg +Places365_test_00119940.jpg +Places365_test_00119946.jpg +Places365_test_00119950.jpg +Places365_test_00119952.jpg +Places365_test_00119982.jpg +Places365_test_00119988.jpg +Places365_test_00119991.jpg +Places365_test_00119994.jpg +Places365_test_00120006.jpg +Places365_test_00120017.jpg +Places365_test_00120056.jpg +Places365_test_00120062.jpg +Places365_test_00120069.jpg +Places365_test_00120073.jpg +Places365_test_00120102.jpg +Places365_test_00120113.jpg +Places365_test_00120133.jpg +Places365_test_00120137.jpg +Places365_test_00120146.jpg +Places365_test_00120149.jpg +Places365_test_00120199.jpg +Places365_test_00120206.jpg +Places365_test_00120210.jpg +Places365_test_00120217.jpg +Places365_test_00120219.jpg +Places365_test_00120226.jpg +Places365_test_00120229.jpg +Places365_test_00120255.jpg +Places365_test_00120261.jpg +Places365_test_00120292.jpg +Places365_test_00120293.jpg +Places365_test_00120304.jpg +Places365_test_00120307.jpg +Places365_test_00120315.jpg +Places365_test_00120319.jpg +Places365_test_00120331.jpg +Places365_test_00120354.jpg +Places365_test_00120355.jpg +Places365_test_00120363.jpg +Places365_test_00120365.jpg +Places365_test_00120370.jpg +Places365_test_00120382.jpg +Places365_test_00120389.jpg +Places365_test_00120393.jpg +Places365_test_00120405.jpg +Places365_test_00120407.jpg +Places365_test_00120413.jpg +Places365_test_00120446.jpg +Places365_test_00120453.jpg +Places365_test_00120469.jpg +Places365_test_00120470.jpg +Places365_test_00120471.jpg +Places365_test_00120484.jpg +Places365_test_00120493.jpg +Places365_test_00120513.jpg +Places365_test_00120519.jpg +Places365_test_00120527.jpg +Places365_test_00120537.jpg +Places365_test_00120552.jpg +Places365_test_00120556.jpg +Places365_test_00120559.jpg +Places365_test_00120563.jpg +Places365_test_00120573.jpg +Places365_test_00120579.jpg +Places365_test_00120582.jpg +Places365_test_00120585.jpg +Places365_test_00120586.jpg +Places365_test_00120587.jpg +Places365_test_00120609.jpg +Places365_test_00120612.jpg +Places365_test_00120614.jpg +Places365_test_00120617.jpg +Places365_test_00120630.jpg +Places365_test_00120634.jpg +Places365_test_00120650.jpg +Places365_test_00120654.jpg +Places365_test_00120660.jpg +Places365_test_00120668.jpg +Places365_test_00120677.jpg +Places365_test_00120689.jpg +Places365_test_00120692.jpg +Places365_test_00120710.jpg +Places365_test_00120718.jpg +Places365_test_00120725.jpg +Places365_test_00120740.jpg +Places365_test_00120754.jpg +Places365_test_00120759.jpg +Places365_test_00120774.jpg +Places365_test_00120778.jpg +Places365_test_00120779.jpg +Places365_test_00120791.jpg +Places365_test_00120799.jpg +Places365_test_00120814.jpg +Places365_test_00120817.jpg +Places365_test_00120839.jpg +Places365_test_00120857.jpg +Places365_test_00120878.jpg +Places365_test_00120882.jpg +Places365_test_00120887.jpg +Places365_test_00120902.jpg +Places365_test_00120909.jpg +Places365_test_00120915.jpg +Places365_test_00120925.jpg +Places365_test_00120933.jpg +Places365_test_00120963.jpg +Places365_test_00120991.jpg +Places365_test_00121007.jpg +Places365_test_00121011.jpg +Places365_test_00121019.jpg +Places365_test_00121028.jpg +Places365_test_00121047.jpg +Places365_test_00121051.jpg +Places365_test_00121090.jpg +Places365_test_00121091.jpg +Places365_test_00121095.jpg +Places365_test_00121097.jpg +Places365_test_00121130.jpg +Places365_test_00121135.jpg +Places365_test_00121139.jpg +Places365_test_00121152.jpg +Places365_test_00121161.jpg +Places365_test_00121168.jpg +Places365_test_00121172.jpg +Places365_test_00121196.jpg +Places365_test_00121197.jpg +Places365_test_00121207.jpg +Places365_test_00121209.jpg +Places365_test_00121218.jpg +Places365_test_00121241.jpg +Places365_test_00121247.jpg +Places365_test_00121255.jpg +Places365_test_00121270.jpg +Places365_test_00121275.jpg +Places365_test_00121291.jpg +Places365_test_00121311.jpg +Places365_test_00121326.jpg +Places365_test_00121332.jpg +Places365_test_00121342.jpg +Places365_test_00121356.jpg +Places365_test_00121358.jpg +Places365_test_00121379.jpg +Places365_test_00121396.jpg +Places365_test_00121442.jpg +Places365_test_00121454.jpg +Places365_test_00121472.jpg +Places365_test_00121477.jpg +Places365_test_00121487.jpg +Places365_test_00121489.jpg +Places365_test_00121494.jpg +Places365_test_00121498.jpg +Places365_test_00121506.jpg +Places365_test_00121511.jpg +Places365_test_00121531.jpg +Places365_test_00121533.jpg +Places365_test_00121550.jpg +Places365_test_00121563.jpg +Places365_test_00121583.jpg +Places365_test_00121591.jpg +Places365_test_00121602.jpg +Places365_test_00121615.jpg +Places365_test_00121618.jpg +Places365_test_00121620.jpg +Places365_test_00121623.jpg +Places365_test_00121658.jpg +Places365_test_00121664.jpg +Places365_test_00121665.jpg +Places365_test_00121667.jpg +Places365_test_00121691.jpg +Places365_test_00121695.jpg +Places365_test_00121715.jpg +Places365_test_00121718.jpg +Places365_test_00121726.jpg +Places365_test_00121762.jpg +Places365_test_00121768.jpg +Places365_test_00121776.jpg +Places365_test_00121783.jpg +Places365_test_00121811.jpg +Places365_test_00121812.jpg +Places365_test_00121818.jpg +Places365_test_00121831.jpg +Places365_test_00121832.jpg +Places365_test_00121834.jpg +Places365_test_00121889.jpg +Places365_test_00121906.jpg +Places365_test_00121917.jpg +Places365_test_00121935.jpg +Places365_test_00121937.jpg +Places365_test_00121948.jpg +Places365_test_00121952.jpg +Places365_test_00121959.jpg +Places365_test_00121973.jpg +Places365_test_00122006.jpg +Places365_test_00122015.jpg +Places365_test_00122019.jpg +Places365_test_00122035.jpg +Places365_test_00122041.jpg +Places365_test_00122045.jpg +Places365_test_00122048.jpg +Places365_test_00122051.jpg +Places365_test_00122064.jpg +Places365_test_00122095.jpg +Places365_test_00122099.jpg +Places365_test_00122108.jpg +Places365_test_00122120.jpg +Places365_test_00122131.jpg +Places365_test_00122136.jpg +Places365_test_00122141.jpg +Places365_test_00122150.jpg +Places365_test_00122154.jpg +Places365_test_00122155.jpg +Places365_test_00122161.jpg +Places365_test_00122198.jpg +Places365_test_00122212.jpg +Places365_test_00122260.jpg +Places365_test_00122265.jpg +Places365_test_00122272.jpg +Places365_test_00122274.jpg +Places365_test_00122281.jpg +Places365_test_00122287.jpg +Places365_test_00122288.jpg +Places365_test_00122299.jpg +Places365_test_00122300.jpg +Places365_test_00122318.jpg +Places365_test_00122329.jpg +Places365_test_00122333.jpg +Places365_test_00122336.jpg +Places365_test_00122342.jpg +Places365_test_00122345.jpg +Places365_test_00122348.jpg +Places365_test_00122349.jpg +Places365_test_00122354.jpg +Places365_test_00122376.jpg +Places365_test_00122384.jpg +Places365_test_00122387.jpg +Places365_test_00122393.jpg +Places365_test_00122436.jpg +Places365_test_00122463.jpg +Places365_test_00122465.jpg +Places365_test_00122475.jpg +Places365_test_00122502.jpg +Places365_test_00122503.jpg +Places365_test_00122517.jpg +Places365_test_00122544.jpg +Places365_test_00122545.jpg +Places365_test_00122556.jpg +Places365_test_00122579.jpg +Places365_test_00122588.jpg +Places365_test_00122590.jpg +Places365_test_00122595.jpg +Places365_test_00122596.jpg +Places365_test_00122652.jpg +Places365_test_00122658.jpg +Places365_test_00122662.jpg +Places365_test_00122678.jpg +Places365_test_00122681.jpg +Places365_test_00122692.jpg +Places365_test_00122696.jpg +Places365_test_00122701.jpg +Places365_test_00122717.jpg +Places365_test_00122727.jpg +Places365_test_00122747.jpg +Places365_test_00122750.jpg +Places365_test_00122754.jpg +Places365_test_00122767.jpg +Places365_test_00122778.jpg +Places365_test_00122793.jpg +Places365_test_00122798.jpg +Places365_test_00122804.jpg +Places365_test_00122823.jpg +Places365_test_00122852.jpg +Places365_test_00122896.jpg +Places365_test_00122899.jpg +Places365_test_00122902.jpg +Places365_test_00122910.jpg +Places365_test_00122923.jpg +Places365_test_00122927.jpg +Places365_test_00122936.jpg +Places365_test_00122943.jpg +Places365_test_00122954.jpg +Places365_test_00122958.jpg +Places365_test_00122974.jpg +Places365_test_00122975.jpg +Places365_test_00122981.jpg +Places365_test_00123000.jpg +Places365_test_00123001.jpg +Places365_test_00123004.jpg +Places365_test_00123024.jpg +Places365_test_00123051.jpg +Places365_test_00123055.jpg +Places365_test_00123064.jpg +Places365_test_00123071.jpg +Places365_test_00123074.jpg +Places365_test_00123078.jpg +Places365_test_00123102.jpg +Places365_test_00123107.jpg +Places365_test_00123125.jpg +Places365_test_00123126.jpg +Places365_test_00123138.jpg +Places365_test_00123150.jpg +Places365_test_00123154.jpg +Places365_test_00123161.jpg +Places365_test_00123191.jpg +Places365_test_00123199.jpg +Places365_test_00123201.jpg +Places365_test_00123210.jpg +Places365_test_00123224.jpg +Places365_test_00123258.jpg +Places365_test_00123261.jpg +Places365_test_00123263.jpg +Places365_test_00123264.jpg +Places365_test_00123276.jpg +Places365_test_00123279.jpg +Places365_test_00123291.jpg +Places365_test_00123306.jpg +Places365_test_00123326.jpg +Places365_test_00123333.jpg +Places365_test_00123335.jpg +Places365_test_00123336.jpg +Places365_test_00123341.jpg +Places365_test_00123344.jpg +Places365_test_00123363.jpg +Places365_test_00123407.jpg +Places365_test_00123418.jpg +Places365_test_00123422.jpg +Places365_test_00123442.jpg +Places365_test_00123446.jpg +Places365_test_00123458.jpg +Places365_test_00123483.jpg +Places365_test_00123492.jpg +Places365_test_00123496.jpg +Places365_test_00123511.jpg +Places365_test_00123532.jpg +Places365_test_00123555.jpg +Places365_test_00123559.jpg +Places365_test_00123582.jpg +Places365_test_00123592.jpg +Places365_test_00123605.jpg +Places365_test_00123608.jpg +Places365_test_00123617.jpg +Places365_test_00123621.jpg +Places365_test_00123628.jpg +Places365_test_00123650.jpg +Places365_test_00123660.jpg +Places365_test_00123677.jpg +Places365_test_00123691.jpg +Places365_test_00123697.jpg +Places365_test_00123698.jpg +Places365_test_00123700.jpg +Places365_test_00123709.jpg +Places365_test_00123717.jpg +Places365_test_00123718.jpg +Places365_test_00123731.jpg +Places365_test_00123738.jpg +Places365_test_00123741.jpg +Places365_test_00123763.jpg +Places365_test_00123782.jpg +Places365_test_00123809.jpg +Places365_test_00123811.jpg +Places365_test_00123820.jpg +Places365_test_00123828.jpg +Places365_test_00123866.jpg +Places365_test_00123882.jpg +Places365_test_00123883.jpg +Places365_test_00123884.jpg +Places365_test_00123930.jpg +Places365_test_00123944.jpg +Places365_test_00123950.jpg +Places365_test_00123952.jpg +Places365_test_00123975.jpg +Places365_test_00123985.jpg +Places365_test_00124000.jpg +Places365_test_00124001.jpg +Places365_test_00124012.jpg +Places365_test_00124020.jpg +Places365_test_00124048.jpg +Places365_test_00124054.jpg +Places365_test_00124058.jpg +Places365_test_00124059.jpg +Places365_test_00124068.jpg +Places365_test_00124074.jpg +Places365_test_00124078.jpg +Places365_test_00124081.jpg +Places365_test_00124093.jpg +Places365_test_00124100.jpg +Places365_test_00124106.jpg +Places365_test_00124114.jpg +Places365_test_00124116.jpg +Places365_test_00124126.jpg +Places365_test_00124148.jpg +Places365_test_00124151.jpg +Places365_test_00124153.jpg +Places365_test_00124155.jpg +Places365_test_00124182.jpg +Places365_test_00124206.jpg +Places365_test_00124250.jpg +Places365_test_00124254.jpg +Places365_test_00124257.jpg +Places365_test_00124259.jpg +Places365_test_00124260.jpg +Places365_test_00124285.jpg +Places365_test_00124290.jpg +Places365_test_00124304.jpg +Places365_test_00124305.jpg +Places365_test_00124307.jpg +Places365_test_00124313.jpg +Places365_test_00124317.jpg +Places365_test_00124326.jpg +Places365_test_00124342.jpg +Places365_test_00124354.jpg +Places365_test_00124398.jpg +Places365_test_00124428.jpg +Places365_test_00124468.jpg +Places365_test_00124489.jpg +Places365_test_00124504.jpg +Places365_test_00124511.jpg +Places365_test_00124517.jpg +Places365_test_00124523.jpg +Places365_test_00124535.jpg +Places365_test_00124550.jpg +Places365_test_00124564.jpg +Places365_test_00124570.jpg +Places365_test_00124583.jpg +Places365_test_00124585.jpg +Places365_test_00124596.jpg +Places365_test_00124602.jpg +Places365_test_00124623.jpg +Places365_test_00124627.jpg +Places365_test_00124643.jpg +Places365_test_00124651.jpg +Places365_test_00124662.jpg +Places365_test_00124665.jpg +Places365_test_00124682.jpg +Places365_test_00124698.jpg +Places365_test_00124717.jpg +Places365_test_00124723.jpg +Places365_test_00124751.jpg +Places365_test_00124753.jpg +Places365_test_00124757.jpg +Places365_test_00124763.jpg +Places365_test_00124764.jpg +Places365_test_00124779.jpg +Places365_test_00124813.jpg +Places365_test_00124815.jpg +Places365_test_00124822.jpg +Places365_test_00124838.jpg +Places365_test_00124861.jpg +Places365_test_00124862.jpg +Places365_test_00124864.jpg +Places365_test_00124877.jpg +Places365_test_00124887.jpg +Places365_test_00124893.jpg +Places365_test_00124895.jpg +Places365_test_00124909.jpg +Places365_test_00124911.jpg +Places365_test_00124935.jpg +Places365_test_00124944.jpg +Places365_test_00124959.jpg +Places365_test_00124970.jpg +Places365_test_00124989.jpg +Places365_test_00125016.jpg +Places365_test_00125037.jpg +Places365_test_00125038.jpg +Places365_test_00125054.jpg +Places365_test_00125067.jpg +Places365_test_00125086.jpg +Places365_test_00125114.jpg +Places365_test_00125120.jpg +Places365_test_00125133.jpg +Places365_test_00125171.jpg +Places365_test_00125180.jpg +Places365_test_00125181.jpg +Places365_test_00125193.jpg +Places365_test_00125196.jpg +Places365_test_00125197.jpg +Places365_test_00125205.jpg +Places365_test_00125206.jpg +Places365_test_00125208.jpg +Places365_test_00125211.jpg +Places365_test_00125222.jpg +Places365_test_00125229.jpg +Places365_test_00125243.jpg +Places365_test_00125246.jpg +Places365_test_00125257.jpg +Places365_test_00125319.jpg +Places365_test_00125320.jpg +Places365_test_00125321.jpg +Places365_test_00125330.jpg +Places365_test_00125356.jpg +Places365_test_00125364.jpg +Places365_test_00125387.jpg +Places365_test_00125391.jpg +Places365_test_00125392.jpg +Places365_test_00125396.jpg +Places365_test_00125437.jpg +Places365_test_00125439.jpg +Places365_test_00125459.jpg +Places365_test_00125468.jpg +Places365_test_00125469.jpg +Places365_test_00125481.jpg +Places365_test_00125497.jpg +Places365_test_00125536.jpg +Places365_test_00125538.jpg +Places365_test_00125547.jpg +Places365_test_00125553.jpg +Places365_test_00125594.jpg +Places365_test_00125596.jpg +Places365_test_00125597.jpg +Places365_test_00125603.jpg +Places365_test_00125619.jpg +Places365_test_00125620.jpg +Places365_test_00125623.jpg +Places365_test_00125639.jpg +Places365_test_00125653.jpg +Places365_test_00125658.jpg +Places365_test_00125668.jpg +Places365_test_00125676.jpg +Places365_test_00125678.jpg +Places365_test_00125688.jpg +Places365_test_00125700.jpg +Places365_test_00125716.jpg +Places365_test_00125725.jpg +Places365_test_00125739.jpg +Places365_test_00125745.jpg +Places365_test_00125746.jpg +Places365_test_00125758.jpg +Places365_test_00125768.jpg +Places365_test_00125771.jpg +Places365_test_00125772.jpg +Places365_test_00125788.jpg +Places365_test_00125789.jpg +Places365_test_00125833.jpg +Places365_test_00125837.jpg +Places365_test_00125849.jpg +Places365_test_00125866.jpg +Places365_test_00125879.jpg +Places365_test_00125888.jpg +Places365_test_00125897.jpg +Places365_test_00125906.jpg +Places365_test_00125912.jpg +Places365_test_00125913.jpg +Places365_test_00125926.jpg +Places365_test_00125928.jpg +Places365_test_00125929.jpg +Places365_test_00125935.jpg +Places365_test_00125940.jpg +Places365_test_00125947.jpg +Places365_test_00125954.jpg +Places365_test_00125968.jpg +Places365_test_00125969.jpg +Places365_test_00125970.jpg +Places365_test_00126007.jpg +Places365_test_00126015.jpg +Places365_test_00126020.jpg +Places365_test_00126023.jpg +Places365_test_00126044.jpg +Places365_test_00126090.jpg +Places365_test_00126115.jpg +Places365_test_00126118.jpg +Places365_test_00126128.jpg +Places365_test_00126131.jpg +Places365_test_00126144.jpg +Places365_test_00126179.jpg +Places365_test_00126194.jpg +Places365_test_00126196.jpg +Places365_test_00126210.jpg +Places365_test_00126256.jpg +Places365_test_00126269.jpg +Places365_test_00126270.jpg +Places365_test_00126271.jpg +Places365_test_00126280.jpg +Places365_test_00126305.jpg +Places365_test_00126307.jpg +Places365_test_00126308.jpg +Places365_test_00126324.jpg +Places365_test_00126352.jpg +Places365_test_00126371.jpg +Places365_test_00126372.jpg +Places365_test_00126388.jpg +Places365_test_00126391.jpg +Places365_test_00126405.jpg +Places365_test_00126411.jpg +Places365_test_00126412.jpg +Places365_test_00126438.jpg +Places365_test_00126471.jpg +Places365_test_00126487.jpg +Places365_test_00126489.jpg +Places365_test_00126490.jpg +Places365_test_00126493.jpg +Places365_test_00126501.jpg +Places365_test_00126511.jpg +Places365_test_00126518.jpg +Places365_test_00126520.jpg +Places365_test_00126536.jpg +Places365_test_00126545.jpg +Places365_test_00126555.jpg +Places365_test_00126560.jpg +Places365_test_00126571.jpg +Places365_test_00126608.jpg +Places365_test_00126610.jpg +Places365_test_00126631.jpg +Places365_test_00126652.jpg +Places365_test_00126655.jpg +Places365_test_00126664.jpg +Places365_test_00126669.jpg +Places365_test_00126682.jpg +Places365_test_00126684.jpg +Places365_test_00126698.jpg +Places365_test_00126710.jpg +Places365_test_00126716.jpg +Places365_test_00126738.jpg +Places365_test_00126751.jpg +Places365_test_00126769.jpg +Places365_test_00126772.jpg +Places365_test_00126791.jpg +Places365_test_00126811.jpg +Places365_test_00126816.jpg +Places365_test_00126817.jpg +Places365_test_00126818.jpg +Places365_test_00126826.jpg +Places365_test_00126829.jpg +Places365_test_00126836.jpg +Places365_test_00126841.jpg +Places365_test_00126842.jpg +Places365_test_00126854.jpg +Places365_test_00126873.jpg +Places365_test_00126877.jpg +Places365_test_00126912.jpg +Places365_test_00126941.jpg +Places365_test_00126942.jpg +Places365_test_00126948.jpg +Places365_test_00126969.jpg +Places365_test_00126974.jpg +Places365_test_00126990.jpg +Places365_test_00126999.jpg +Places365_test_00127019.jpg +Places365_test_00127021.jpg +Places365_test_00127034.jpg +Places365_test_00127057.jpg +Places365_test_00127081.jpg +Places365_test_00127086.jpg +Places365_test_00127090.jpg +Places365_test_00127092.jpg +Places365_test_00127113.jpg +Places365_test_00127117.jpg +Places365_test_00127119.jpg +Places365_test_00127157.jpg +Places365_test_00127166.jpg +Places365_test_00127187.jpg +Places365_test_00127199.jpg +Places365_test_00127200.jpg +Places365_test_00127202.jpg +Places365_test_00127205.jpg +Places365_test_00127233.jpg +Places365_test_00127239.jpg +Places365_test_00127243.jpg +Places365_test_00127244.jpg +Places365_test_00127264.jpg +Places365_test_00127308.jpg +Places365_test_00127321.jpg +Places365_test_00127350.jpg +Places365_test_00127358.jpg +Places365_test_00127374.jpg +Places365_test_00127382.jpg +Places365_test_00127384.jpg +Places365_test_00127389.jpg +Places365_test_00127392.jpg +Places365_test_00127415.jpg +Places365_test_00127423.jpg +Places365_test_00127433.jpg +Places365_test_00127436.jpg +Places365_test_00127450.jpg +Places365_test_00127480.jpg +Places365_test_00127511.jpg +Places365_test_00127521.jpg +Places365_test_00127525.jpg +Places365_test_00127544.jpg +Places365_test_00127546.jpg +Places365_test_00127566.jpg +Places365_test_00127580.jpg +Places365_test_00127585.jpg +Places365_test_00127619.jpg +Places365_test_00127627.jpg +Places365_test_00127633.jpg +Places365_test_00127638.jpg +Places365_test_00127652.jpg +Places365_test_00127659.jpg +Places365_test_00127685.jpg +Places365_test_00127690.jpg +Places365_test_00127698.jpg +Places365_test_00127714.jpg +Places365_test_00127717.jpg +Places365_test_00127723.jpg +Places365_test_00127729.jpg +Places365_test_00127738.jpg +Places365_test_00127744.jpg +Places365_test_00127750.jpg +Places365_test_00127753.jpg +Places365_test_00127756.jpg +Places365_test_00127761.jpg +Places365_test_00127762.jpg +Places365_test_00127806.jpg +Places365_test_00127824.jpg +Places365_test_00127826.jpg +Places365_test_00127828.jpg +Places365_test_00127843.jpg +Places365_test_00127845.jpg +Places365_test_00127847.jpg +Places365_test_00127852.jpg +Places365_test_00127858.jpg +Places365_test_00127860.jpg +Places365_test_00127865.jpg +Places365_test_00127899.jpg +Places365_test_00127902.jpg +Places365_test_00127919.jpg +Places365_test_00127923.jpg +Places365_test_00127926.jpg +Places365_test_00127935.jpg +Places365_test_00127937.jpg +Places365_test_00127947.jpg +Places365_test_00127954.jpg +Places365_test_00127960.jpg +Places365_test_00127964.jpg +Places365_test_00127973.jpg +Places365_test_00127990.jpg +Places365_test_00128004.jpg +Places365_test_00128014.jpg +Places365_test_00128018.jpg +Places365_test_00128080.jpg +Places365_test_00128081.jpg +Places365_test_00128082.jpg +Places365_test_00128104.jpg +Places365_test_00128110.jpg +Places365_test_00128115.jpg +Places365_test_00128149.jpg +Places365_test_00128159.jpg +Places365_test_00128166.jpg +Places365_test_00128171.jpg +Places365_test_00128191.jpg +Places365_test_00128207.jpg +Places365_test_00128209.jpg +Places365_test_00128215.jpg +Places365_test_00128219.jpg +Places365_test_00128230.jpg +Places365_test_00128269.jpg +Places365_test_00128277.jpg +Places365_test_00128279.jpg +Places365_test_00128281.jpg +Places365_test_00128289.jpg +Places365_test_00128295.jpg +Places365_test_00128299.jpg +Places365_test_00128300.jpg +Places365_test_00128324.jpg +Places365_test_00128326.jpg +Places365_test_00128337.jpg +Places365_test_00128362.jpg +Places365_test_00128402.jpg +Places365_test_00128415.jpg +Places365_test_00128422.jpg +Places365_test_00128449.jpg +Places365_test_00128460.jpg +Places365_test_00128463.jpg +Places365_test_00128475.jpg +Places365_test_00128476.jpg +Places365_test_00128496.jpg +Places365_test_00128505.jpg +Places365_test_00128516.jpg +Places365_test_00128523.jpg +Places365_test_00128565.jpg +Places365_test_00128581.jpg +Places365_test_00128582.jpg +Places365_test_00128594.jpg +Places365_test_00128596.jpg +Places365_test_00128603.jpg +Places365_test_00128608.jpg +Places365_test_00128617.jpg +Places365_test_00128628.jpg +Places365_test_00128637.jpg +Places365_test_00128652.jpg +Places365_test_00128655.jpg +Places365_test_00128661.jpg +Places365_test_00128678.jpg +Places365_test_00128681.jpg +Places365_test_00128683.jpg +Places365_test_00128684.jpg +Places365_test_00128699.jpg +Places365_test_00128749.jpg +Places365_test_00128756.jpg +Places365_test_00128762.jpg +Places365_test_00128776.jpg +Places365_test_00128785.jpg +Places365_test_00128802.jpg +Places365_test_00128804.jpg +Places365_test_00128815.jpg +Places365_test_00128817.jpg +Places365_test_00128836.jpg +Places365_test_00128842.jpg +Places365_test_00128865.jpg +Places365_test_00128868.jpg +Places365_test_00128881.jpg +Places365_test_00128900.jpg +Places365_test_00128913.jpg +Places365_test_00128918.jpg +Places365_test_00128921.jpg +Places365_test_00128937.jpg +Places365_test_00128939.jpg +Places365_test_00128964.jpg +Places365_test_00128971.jpg +Places365_test_00128983.jpg +Places365_test_00128988.jpg +Places365_test_00128993.jpg +Places365_test_00128998.jpg +Places365_test_00128999.jpg +Places365_test_00129013.jpg +Places365_test_00129030.jpg +Places365_test_00129036.jpg +Places365_test_00129058.jpg +Places365_test_00129063.jpg +Places365_test_00129092.jpg +Places365_test_00129095.jpg +Places365_test_00129099.jpg +Places365_test_00129142.jpg +Places365_test_00129145.jpg +Places365_test_00129167.jpg +Places365_test_00129181.jpg +Places365_test_00129218.jpg +Places365_test_00129252.jpg +Places365_test_00129254.jpg +Places365_test_00129258.jpg +Places365_test_00129259.jpg +Places365_test_00129267.jpg +Places365_test_00129279.jpg +Places365_test_00129287.jpg +Places365_test_00129293.jpg +Places365_test_00129310.jpg +Places365_test_00129347.jpg +Places365_test_00129379.jpg +Places365_test_00129388.jpg +Places365_test_00129398.jpg +Places365_test_00129405.jpg +Places365_test_00129408.jpg +Places365_test_00129420.jpg +Places365_test_00129426.jpg +Places365_test_00129456.jpg +Places365_test_00129458.jpg +Places365_test_00129471.jpg +Places365_test_00129475.jpg +Places365_test_00129476.jpg +Places365_test_00129492.jpg +Places365_test_00129509.jpg +Places365_test_00129514.jpg +Places365_test_00129525.jpg +Places365_test_00129539.jpg +Places365_test_00129543.jpg +Places365_test_00129545.jpg +Places365_test_00129556.jpg +Places365_test_00129577.jpg +Places365_test_00129589.jpg +Places365_test_00129614.jpg +Places365_test_00129619.jpg +Places365_test_00129624.jpg +Places365_test_00129626.jpg +Places365_test_00129632.jpg +Places365_test_00129665.jpg +Places365_test_00129669.jpg +Places365_test_00129672.jpg +Places365_test_00129688.jpg +Places365_test_00129692.jpg +Places365_test_00129698.jpg +Places365_test_00129715.jpg +Places365_test_00129720.jpg +Places365_test_00129729.jpg +Places365_test_00129744.jpg +Places365_test_00129760.jpg +Places365_test_00129780.jpg +Places365_test_00129782.jpg +Places365_test_00129801.jpg +Places365_test_00129804.jpg +Places365_test_00129819.jpg +Places365_test_00129843.jpg +Places365_test_00129857.jpg +Places365_test_00129868.jpg +Places365_test_00129881.jpg +Places365_test_00129907.jpg +Places365_test_00129916.jpg +Places365_test_00129917.jpg +Places365_test_00129989.jpg +Places365_test_00130002.jpg +Places365_test_00130010.jpg +Places365_test_00130011.jpg +Places365_test_00130014.jpg +Places365_test_00130027.jpg +Places365_test_00130030.jpg +Places365_test_00130042.jpg +Places365_test_00130054.jpg +Places365_test_00130079.jpg +Places365_test_00130080.jpg +Places365_test_00130092.jpg +Places365_test_00130097.jpg +Places365_test_00130106.jpg +Places365_test_00130123.jpg +Places365_test_00130124.jpg +Places365_test_00130127.jpg +Places365_test_00130138.jpg +Places365_test_00130147.jpg +Places365_test_00130158.jpg +Places365_test_00130160.jpg +Places365_test_00130187.jpg +Places365_test_00130194.jpg +Places365_test_00130202.jpg +Places365_test_00130206.jpg +Places365_test_00130227.jpg +Places365_test_00130261.jpg +Places365_test_00130262.jpg +Places365_test_00130264.jpg +Places365_test_00130271.jpg +Places365_test_00130272.jpg +Places365_test_00130276.jpg +Places365_test_00130279.jpg +Places365_test_00130288.jpg +Places365_test_00130293.jpg +Places365_test_00130299.jpg +Places365_test_00130309.jpg +Places365_test_00130311.jpg +Places365_test_00130314.jpg +Places365_test_00130318.jpg +Places365_test_00130323.jpg +Places365_test_00130334.jpg +Places365_test_00130348.jpg +Places365_test_00130353.jpg +Places365_test_00130371.jpg +Places365_test_00130385.jpg +Places365_test_00130441.jpg +Places365_test_00130456.jpg +Places365_test_00130479.jpg +Places365_test_00130497.jpg +Places365_test_00130500.jpg +Places365_test_00130501.jpg +Places365_test_00130527.jpg +Places365_test_00130531.jpg +Places365_test_00130546.jpg +Places365_test_00130561.jpg +Places365_test_00130577.jpg +Places365_test_00130579.jpg +Places365_test_00130593.jpg +Places365_test_00130598.jpg +Places365_test_00130600.jpg +Places365_test_00130605.jpg +Places365_test_00130606.jpg +Places365_test_00130616.jpg +Places365_test_00130617.jpg +Places365_test_00130634.jpg +Places365_test_00130651.jpg +Places365_test_00130655.jpg +Places365_test_00130696.jpg +Places365_test_00130699.jpg +Places365_test_00130705.jpg +Places365_test_00130715.jpg +Places365_test_00130722.jpg +Places365_test_00130724.jpg +Places365_test_00130736.jpg +Places365_test_00130747.jpg +Places365_test_00130752.jpg +Places365_test_00130775.jpg +Places365_test_00130781.jpg +Places365_test_00130797.jpg +Places365_test_00130808.jpg +Places365_test_00130815.jpg +Places365_test_00130817.jpg +Places365_test_00130827.jpg +Places365_test_00130837.jpg +Places365_test_00130845.jpg +Places365_test_00130854.jpg +Places365_test_00130870.jpg +Places365_test_00130871.jpg +Places365_test_00130889.jpg +Places365_test_00130899.jpg +Places365_test_00130907.jpg +Places365_test_00130910.jpg +Places365_test_00130919.jpg +Places365_test_00130935.jpg +Places365_test_00130941.jpg +Places365_test_00130942.jpg +Places365_test_00130966.jpg +Places365_test_00130967.jpg +Places365_test_00130973.jpg +Places365_test_00130975.jpg +Places365_test_00130980.jpg +Places365_test_00130989.jpg +Places365_test_00131026.jpg +Places365_test_00131030.jpg +Places365_test_00131032.jpg +Places365_test_00131056.jpg +Places365_test_00131078.jpg +Places365_test_00131080.jpg +Places365_test_00131085.jpg +Places365_test_00131097.jpg +Places365_test_00131100.jpg +Places365_test_00131105.jpg +Places365_test_00131108.jpg +Places365_test_00131115.jpg +Places365_test_00131126.jpg +Places365_test_00131133.jpg +Places365_test_00131135.jpg +Places365_test_00131156.jpg +Places365_test_00131161.jpg +Places365_test_00131179.jpg +Places365_test_00131189.jpg +Places365_test_00131208.jpg +Places365_test_00131212.jpg +Places365_test_00131213.jpg +Places365_test_00131241.jpg +Places365_test_00131251.jpg +Places365_test_00131263.jpg +Places365_test_00131268.jpg +Places365_test_00131269.jpg +Places365_test_00131299.jpg +Places365_test_00131300.jpg +Places365_test_00131306.jpg +Places365_test_00131327.jpg +Places365_test_00131345.jpg +Places365_test_00131346.jpg +Places365_test_00131369.jpg +Places365_test_00131383.jpg +Places365_test_00131389.jpg +Places365_test_00131391.jpg +Places365_test_00131397.jpg +Places365_test_00131399.jpg +Places365_test_00131410.jpg +Places365_test_00131421.jpg +Places365_test_00131460.jpg +Places365_test_00131513.jpg +Places365_test_00131526.jpg +Places365_test_00131531.jpg +Places365_test_00131546.jpg +Places365_test_00131555.jpg +Places365_test_00131574.jpg +Places365_test_00131613.jpg +Places365_test_00131615.jpg +Places365_test_00131632.jpg +Places365_test_00131642.jpg +Places365_test_00131644.jpg +Places365_test_00131655.jpg +Places365_test_00131661.jpg +Places365_test_00131664.jpg +Places365_test_00131665.jpg +Places365_test_00131680.jpg +Places365_test_00131701.jpg +Places365_test_00131717.jpg +Places365_test_00131720.jpg +Places365_test_00131721.jpg +Places365_test_00131798.jpg +Places365_test_00131800.jpg +Places365_test_00131820.jpg +Places365_test_00131827.jpg +Places365_test_00131828.jpg +Places365_test_00131852.jpg +Places365_test_00131870.jpg +Places365_test_00131907.jpg +Places365_test_00131918.jpg +Places365_test_00131928.jpg +Places365_test_00131933.jpg +Places365_test_00131935.jpg +Places365_test_00131936.jpg +Places365_test_00131950.jpg +Places365_test_00131958.jpg +Places365_test_00132022.jpg +Places365_test_00132025.jpg +Places365_test_00132039.jpg +Places365_test_00132064.jpg +Places365_test_00132076.jpg +Places365_test_00132091.jpg +Places365_test_00132094.jpg +Places365_test_00132103.jpg +Places365_test_00132114.jpg +Places365_test_00132140.jpg +Places365_test_00132143.jpg +Places365_test_00132160.jpg +Places365_test_00132161.jpg +Places365_test_00132165.jpg +Places365_test_00132187.jpg +Places365_test_00132188.jpg +Places365_test_00132194.jpg +Places365_test_00132197.jpg +Places365_test_00132253.jpg +Places365_test_00132254.jpg +Places365_test_00132262.jpg +Places365_test_00132286.jpg +Places365_test_00132287.jpg +Places365_test_00132324.jpg +Places365_test_00132354.jpg +Places365_test_00132375.jpg +Places365_test_00132383.jpg +Places365_test_00132390.jpg +Places365_test_00132405.jpg +Places365_test_00132421.jpg +Places365_test_00132430.jpg +Places365_test_00132442.jpg +Places365_test_00132451.jpg +Places365_test_00132484.jpg +Places365_test_00132485.jpg +Places365_test_00132510.jpg +Places365_test_00132514.jpg +Places365_test_00132522.jpg +Places365_test_00132536.jpg +Places365_test_00132541.jpg +Places365_test_00132550.jpg +Places365_test_00132579.jpg +Places365_test_00132588.jpg +Places365_test_00132595.jpg +Places365_test_00132598.jpg +Places365_test_00132624.jpg +Places365_test_00132635.jpg +Places365_test_00132640.jpg +Places365_test_00132643.jpg +Places365_test_00132656.jpg +Places365_test_00132660.jpg +Places365_test_00132669.jpg +Places365_test_00132673.jpg +Places365_test_00132677.jpg +Places365_test_00132684.jpg +Places365_test_00132685.jpg +Places365_test_00132709.jpg +Places365_test_00132728.jpg +Places365_test_00132730.jpg +Places365_test_00132731.jpg +Places365_test_00132735.jpg +Places365_test_00132754.jpg +Places365_test_00132760.jpg +Places365_test_00132768.jpg +Places365_test_00132771.jpg +Places365_test_00132785.jpg +Places365_test_00132804.jpg +Places365_test_00132829.jpg +Places365_test_00132833.jpg +Places365_test_00132834.jpg +Places365_test_00132845.jpg +Places365_test_00132847.jpg +Places365_test_00132862.jpg +Places365_test_00132874.jpg +Places365_test_00132884.jpg +Places365_test_00132908.jpg +Places365_test_00132915.jpg +Places365_test_00132917.jpg +Places365_test_00132926.jpg +Places365_test_00132932.jpg +Places365_test_00132941.jpg +Places365_test_00132952.jpg +Places365_test_00132966.jpg +Places365_test_00132969.jpg +Places365_test_00132973.jpg +Places365_test_00132986.jpg +Places365_test_00133025.jpg +Places365_test_00133034.jpg +Places365_test_00133045.jpg +Places365_test_00133053.jpg +Places365_test_00133061.jpg +Places365_test_00133067.jpg +Places365_test_00133097.jpg +Places365_test_00133123.jpg +Places365_test_00133135.jpg +Places365_test_00133141.jpg +Places365_test_00133155.jpg +Places365_test_00133165.jpg +Places365_test_00133192.jpg +Places365_test_00133208.jpg +Places365_test_00133216.jpg +Places365_test_00133219.jpg +Places365_test_00133226.jpg +Places365_test_00133231.jpg +Places365_test_00133240.jpg +Places365_test_00133255.jpg +Places365_test_00133257.jpg +Places365_test_00133261.jpg +Places365_test_00133266.jpg +Places365_test_00133272.jpg +Places365_test_00133284.jpg +Places365_test_00133317.jpg +Places365_test_00133321.jpg +Places365_test_00133325.jpg +Places365_test_00133326.jpg +Places365_test_00133341.jpg +Places365_test_00133347.jpg +Places365_test_00133353.jpg +Places365_test_00133366.jpg +Places365_test_00133369.jpg +Places365_test_00133379.jpg +Places365_test_00133380.jpg +Places365_test_00133383.jpg +Places365_test_00133395.jpg +Places365_test_00133410.jpg +Places365_test_00133439.jpg +Places365_test_00133448.jpg +Places365_test_00133483.jpg +Places365_test_00133486.jpg +Places365_test_00133487.jpg +Places365_test_00133498.jpg +Places365_test_00133509.jpg +Places365_test_00133541.jpg +Places365_test_00133544.jpg +Places365_test_00133546.jpg +Places365_test_00133550.jpg +Places365_test_00133554.jpg +Places365_test_00133565.jpg +Places365_test_00133575.jpg +Places365_test_00133578.jpg +Places365_test_00133585.jpg +Places365_test_00133589.jpg +Places365_test_00133606.jpg +Places365_test_00133616.jpg +Places365_test_00133626.jpg +Places365_test_00133634.jpg +Places365_test_00133642.jpg +Places365_test_00133645.jpg +Places365_test_00133647.jpg +Places365_test_00133654.jpg +Places365_test_00133658.jpg +Places365_test_00133680.jpg +Places365_test_00133696.jpg +Places365_test_00133699.jpg +Places365_test_00133714.jpg +Places365_test_00133720.jpg +Places365_test_00133733.jpg +Places365_test_00133738.jpg +Places365_test_00133755.jpg +Places365_test_00133764.jpg +Places365_test_00133765.jpg +Places365_test_00133766.jpg +Places365_test_00133784.jpg +Places365_test_00133788.jpg +Places365_test_00133816.jpg +Places365_test_00133824.jpg +Places365_test_00133838.jpg +Places365_test_00133843.jpg +Places365_test_00133888.jpg +Places365_test_00133890.jpg +Places365_test_00133896.jpg +Places365_test_00133902.jpg +Places365_test_00133904.jpg +Places365_test_00133920.jpg +Places365_test_00133933.jpg +Places365_test_00133944.jpg +Places365_test_00133947.jpg +Places365_test_00133974.jpg +Places365_test_00134000.jpg +Places365_test_00134008.jpg +Places365_test_00134010.jpg +Places365_test_00134019.jpg +Places365_test_00134024.jpg +Places365_test_00134035.jpg +Places365_test_00134040.jpg +Places365_test_00134056.jpg +Places365_test_00134082.jpg +Places365_test_00134087.jpg +Places365_test_00134105.jpg +Places365_test_00134114.jpg +Places365_test_00134129.jpg +Places365_test_00134147.jpg +Places365_test_00134152.jpg +Places365_test_00134163.jpg +Places365_test_00134179.jpg +Places365_test_00134187.jpg +Places365_test_00134201.jpg +Places365_test_00134208.jpg +Places365_test_00134248.jpg +Places365_test_00134258.jpg +Places365_test_00134294.jpg +Places365_test_00134296.jpg +Places365_test_00134297.jpg +Places365_test_00134302.jpg +Places365_test_00134311.jpg +Places365_test_00134316.jpg +Places365_test_00134319.jpg +Places365_test_00134322.jpg +Places365_test_00134349.jpg +Places365_test_00134376.jpg +Places365_test_00134400.jpg +Places365_test_00134401.jpg +Places365_test_00134428.jpg +Places365_test_00134429.jpg +Places365_test_00134441.jpg +Places365_test_00134474.jpg +Places365_test_00134485.jpg +Places365_test_00134488.jpg +Places365_test_00134490.jpg +Places365_test_00134508.jpg +Places365_test_00134514.jpg +Places365_test_00134529.jpg +Places365_test_00134545.jpg +Places365_test_00134546.jpg +Places365_test_00134563.jpg +Places365_test_00134564.jpg +Places365_test_00134572.jpg +Places365_test_00134590.jpg +Places365_test_00134600.jpg +Places365_test_00134605.jpg +Places365_test_00134616.jpg +Places365_test_00134620.jpg +Places365_test_00134633.jpg +Places365_test_00134640.jpg +Places365_test_00134642.jpg +Places365_test_00134644.jpg +Places365_test_00134653.jpg +Places365_test_00134654.jpg +Places365_test_00134666.jpg +Places365_test_00134678.jpg +Places365_test_00134709.jpg +Places365_test_00134721.jpg +Places365_test_00134768.jpg +Places365_test_00134787.jpg +Places365_test_00134788.jpg +Places365_test_00134794.jpg +Places365_test_00134796.jpg +Places365_test_00134799.jpg +Places365_test_00134820.jpg +Places365_test_00134826.jpg +Places365_test_00134842.jpg +Places365_test_00134852.jpg +Places365_test_00134853.jpg +Places365_test_00134868.jpg +Places365_test_00134883.jpg +Places365_test_00134889.jpg +Places365_test_00134920.jpg +Places365_test_00134923.jpg +Places365_test_00134926.jpg +Places365_test_00134946.jpg +Places365_test_00134987.jpg +Places365_test_00135018.jpg +Places365_test_00135019.jpg +Places365_test_00135020.jpg +Places365_test_00135023.jpg +Places365_test_00135027.jpg +Places365_test_00135033.jpg +Places365_test_00135047.jpg +Places365_test_00135050.jpg +Places365_test_00135058.jpg +Places365_test_00135065.jpg +Places365_test_00135066.jpg +Places365_test_00135068.jpg +Places365_test_00135094.jpg +Places365_test_00135129.jpg +Places365_test_00135130.jpg +Places365_test_00135142.jpg +Places365_test_00135149.jpg +Places365_test_00135157.jpg +Places365_test_00135168.jpg +Places365_test_00135169.jpg +Places365_test_00135183.jpg +Places365_test_00135184.jpg +Places365_test_00135209.jpg +Places365_test_00135212.jpg +Places365_test_00135223.jpg +Places365_test_00135259.jpg +Places365_test_00135261.jpg +Places365_test_00135265.jpg +Places365_test_00135293.jpg +Places365_test_00135300.jpg +Places365_test_00135304.jpg +Places365_test_00135310.jpg +Places365_test_00135314.jpg +Places365_test_00135322.jpg +Places365_test_00135327.jpg +Places365_test_00135332.jpg +Places365_test_00135359.jpg +Places365_test_00135407.jpg +Places365_test_00135410.jpg +Places365_test_00135428.jpg +Places365_test_00135440.jpg +Places365_test_00135461.jpg +Places365_test_00135477.jpg +Places365_test_00135487.jpg +Places365_test_00135488.jpg +Places365_test_00135502.jpg +Places365_test_00135505.jpg +Places365_test_00135507.jpg +Places365_test_00135521.jpg +Places365_test_00135536.jpg +Places365_test_00135549.jpg +Places365_test_00135551.jpg +Places365_test_00135557.jpg +Places365_test_00135563.jpg +Places365_test_00135565.jpg +Places365_test_00135567.jpg +Places365_test_00135568.jpg +Places365_test_00135591.jpg +Places365_test_00135593.jpg +Places365_test_00135594.jpg +Places365_test_00135617.jpg +Places365_test_00135623.jpg +Places365_test_00135651.jpg +Places365_test_00135652.jpg +Places365_test_00135660.jpg +Places365_test_00135672.jpg +Places365_test_00135682.jpg +Places365_test_00135685.jpg +Places365_test_00135688.jpg +Places365_test_00135691.jpg +Places365_test_00135701.jpg +Places365_test_00135712.jpg +Places365_test_00135728.jpg +Places365_test_00135733.jpg +Places365_test_00135748.jpg +Places365_test_00135754.jpg +Places365_test_00135756.jpg +Places365_test_00135770.jpg +Places365_test_00135778.jpg +Places365_test_00135783.jpg +Places365_test_00135786.jpg +Places365_test_00135804.jpg +Places365_test_00135840.jpg +Places365_test_00135846.jpg +Places365_test_00135857.jpg +Places365_test_00135864.jpg +Places365_test_00135893.jpg +Places365_test_00135901.jpg +Places365_test_00135908.jpg +Places365_test_00135961.jpg +Places365_test_00135971.jpg +Places365_test_00135972.jpg +Places365_test_00135989.jpg +Places365_test_00135995.jpg +Places365_test_00136012.jpg +Places365_test_00136056.jpg +Places365_test_00136069.jpg +Places365_test_00136070.jpg +Places365_test_00136095.jpg +Places365_test_00136115.jpg +Places365_test_00136123.jpg +Places365_test_00136127.jpg +Places365_test_00136157.jpg +Places365_test_00136166.jpg +Places365_test_00136176.jpg +Places365_test_00136194.jpg +Places365_test_00136200.jpg +Places365_test_00136201.jpg +Places365_test_00136202.jpg +Places365_test_00136206.jpg +Places365_test_00136214.jpg +Places365_test_00136220.jpg +Places365_test_00136223.jpg +Places365_test_00136226.jpg +Places365_test_00136239.jpg +Places365_test_00136244.jpg +Places365_test_00136250.jpg +Places365_test_00136255.jpg +Places365_test_00136259.jpg +Places365_test_00136284.jpg +Places365_test_00136287.jpg +Places365_test_00136290.jpg +Places365_test_00136297.jpg +Places365_test_00136299.jpg +Places365_test_00136318.jpg +Places365_test_00136324.jpg +Places365_test_00136330.jpg +Places365_test_00136339.jpg +Places365_test_00136348.jpg +Places365_test_00136383.jpg +Places365_test_00136385.jpg +Places365_test_00136387.jpg +Places365_test_00136392.jpg +Places365_test_00136396.jpg +Places365_test_00136406.jpg +Places365_test_00136408.jpg +Places365_test_00136418.jpg +Places365_test_00136421.jpg +Places365_test_00136428.jpg +Places365_test_00136470.jpg +Places365_test_00136472.jpg +Places365_test_00136496.jpg +Places365_test_00136504.jpg +Places365_test_00136512.jpg +Places365_test_00136518.jpg +Places365_test_00136530.jpg +Places365_test_00136546.jpg +Places365_test_00136568.jpg +Places365_test_00136575.jpg +Places365_test_00136608.jpg +Places365_test_00136612.jpg +Places365_test_00136621.jpg +Places365_test_00136628.jpg +Places365_test_00136631.jpg +Places365_test_00136643.jpg +Places365_test_00136658.jpg +Places365_test_00136668.jpg +Places365_test_00136672.jpg +Places365_test_00136686.jpg +Places365_test_00136710.jpg +Places365_test_00136715.jpg +Places365_test_00136716.jpg +Places365_test_00136749.jpg +Places365_test_00136791.jpg +Places365_test_00136796.jpg +Places365_test_00136803.jpg +Places365_test_00136809.jpg +Places365_test_00136825.jpg +Places365_test_00136841.jpg +Places365_test_00136857.jpg +Places365_test_00136865.jpg +Places365_test_00136884.jpg +Places365_test_00136922.jpg +Places365_test_00136933.jpg +Places365_test_00136937.jpg +Places365_test_00136963.jpg +Places365_test_00136970.jpg +Places365_test_00136972.jpg +Places365_test_00136977.jpg +Places365_test_00136993.jpg +Places365_test_00137002.jpg +Places365_test_00137033.jpg +Places365_test_00137076.jpg +Places365_test_00137112.jpg +Places365_test_00137116.jpg +Places365_test_00137129.jpg +Places365_test_00137134.jpg +Places365_test_00137140.jpg +Places365_test_00137186.jpg +Places365_test_00137188.jpg +Places365_test_00137199.jpg +Places365_test_00137211.jpg +Places365_test_00137225.jpg +Places365_test_00137236.jpg +Places365_test_00137285.jpg +Places365_test_00137303.jpg +Places365_test_00137317.jpg +Places365_test_00137321.jpg +Places365_test_00137349.jpg +Places365_test_00137398.jpg +Places365_test_00137412.jpg +Places365_test_00137414.jpg +Places365_test_00137421.jpg +Places365_test_00137424.jpg +Places365_test_00137425.jpg +Places365_test_00137432.jpg +Places365_test_00137468.jpg +Places365_test_00137473.jpg +Places365_test_00137491.jpg +Places365_test_00137493.jpg +Places365_test_00137497.jpg +Places365_test_00137498.jpg +Places365_test_00137519.jpg +Places365_test_00137528.jpg +Places365_test_00137544.jpg +Places365_test_00137548.jpg +Places365_test_00137555.jpg +Places365_test_00137563.jpg +Places365_test_00137606.jpg +Places365_test_00137610.jpg +Places365_test_00137618.jpg +Places365_test_00137628.jpg +Places365_test_00137657.jpg +Places365_test_00137663.jpg +Places365_test_00137667.jpg +Places365_test_00137689.jpg +Places365_test_00137706.jpg +Places365_test_00137720.jpg +Places365_test_00137735.jpg +Places365_test_00137759.jpg +Places365_test_00137784.jpg +Places365_test_00137798.jpg +Places365_test_00137811.jpg +Places365_test_00137813.jpg +Places365_test_00137823.jpg +Places365_test_00137824.jpg +Places365_test_00137828.jpg +Places365_test_00137847.jpg +Places365_test_00137849.jpg +Places365_test_00137866.jpg +Places365_test_00137885.jpg +Places365_test_00137892.jpg +Places365_test_00137896.jpg +Places365_test_00137905.jpg +Places365_test_00137962.jpg +Places365_test_00138003.jpg +Places365_test_00138004.jpg +Places365_test_00138025.jpg +Places365_test_00138034.jpg +Places365_test_00138086.jpg +Places365_test_00138105.jpg +Places365_test_00138113.jpg +Places365_test_00138116.jpg +Places365_test_00138139.jpg +Places365_test_00138142.jpg +Places365_test_00138149.jpg +Places365_test_00138154.jpg +Places365_test_00138159.jpg +Places365_test_00138168.jpg +Places365_test_00138181.jpg +Places365_test_00138207.jpg +Places365_test_00138221.jpg +Places365_test_00138229.jpg +Places365_test_00138238.jpg +Places365_test_00138244.jpg +Places365_test_00138254.jpg +Places365_test_00138265.jpg +Places365_test_00138273.jpg +Places365_test_00138274.jpg +Places365_test_00138308.jpg +Places365_test_00138318.jpg +Places365_test_00138337.jpg +Places365_test_00138346.jpg +Places365_test_00138359.jpg +Places365_test_00138380.jpg +Places365_test_00138405.jpg +Places365_test_00138416.jpg +Places365_test_00138417.jpg +Places365_test_00138418.jpg +Places365_test_00138428.jpg +Places365_test_00138489.jpg +Places365_test_00138507.jpg +Places365_test_00138515.jpg +Places365_test_00138518.jpg +Places365_test_00138530.jpg +Places365_test_00138542.jpg +Places365_test_00138558.jpg +Places365_test_00138569.jpg +Places365_test_00138570.jpg +Places365_test_00138579.jpg +Places365_test_00138606.jpg +Places365_test_00138608.jpg +Places365_test_00138620.jpg +Places365_test_00138646.jpg +Places365_test_00138653.jpg +Places365_test_00138664.jpg +Places365_test_00138668.jpg +Places365_test_00138693.jpg +Places365_test_00138717.jpg +Places365_test_00138739.jpg +Places365_test_00138740.jpg +Places365_test_00138755.jpg +Places365_test_00138774.jpg +Places365_test_00138790.jpg +Places365_test_00138793.jpg +Places365_test_00138813.jpg +Places365_test_00138831.jpg +Places365_test_00138836.jpg +Places365_test_00138839.jpg +Places365_test_00138840.jpg +Places365_test_00138869.jpg +Places365_test_00138908.jpg +Places365_test_00138925.jpg +Places365_test_00138946.jpg +Places365_test_00138948.jpg +Places365_test_00138955.jpg +Places365_test_00138963.jpg +Places365_test_00138970.jpg +Places365_test_00138979.jpg +Places365_test_00139014.jpg +Places365_test_00139017.jpg +Places365_test_00139026.jpg +Places365_test_00139030.jpg +Places365_test_00139032.jpg +Places365_test_00139037.jpg +Places365_test_00139045.jpg +Places365_test_00139075.jpg +Places365_test_00139082.jpg +Places365_test_00139088.jpg +Places365_test_00139127.jpg +Places365_test_00139131.jpg +Places365_test_00139134.jpg +Places365_test_00139150.jpg +Places365_test_00139153.jpg +Places365_test_00139163.jpg +Places365_test_00139189.jpg +Places365_test_00139190.jpg +Places365_test_00139199.jpg +Places365_test_00139205.jpg +Places365_test_00139221.jpg +Places365_test_00139237.jpg +Places365_test_00139246.jpg +Places365_test_00139259.jpg +Places365_test_00139266.jpg +Places365_test_00139282.jpg +Places365_test_00139284.jpg +Places365_test_00139287.jpg +Places365_test_00139308.jpg +Places365_test_00139322.jpg +Places365_test_00139335.jpg +Places365_test_00139350.jpg +Places365_test_00139359.jpg +Places365_test_00139374.jpg +Places365_test_00139379.jpg +Places365_test_00139380.jpg +Places365_test_00139391.jpg +Places365_test_00139411.jpg +Places365_test_00139412.jpg +Places365_test_00139421.jpg +Places365_test_00139423.jpg +Places365_test_00139437.jpg +Places365_test_00139442.jpg +Places365_test_00139454.jpg +Places365_test_00139463.jpg +Places365_test_00139465.jpg +Places365_test_00139467.jpg +Places365_test_00139477.jpg +Places365_test_00139485.jpg +Places365_test_00139498.jpg +Places365_test_00139514.jpg +Places365_test_00139532.jpg +Places365_test_00139539.jpg +Places365_test_00139541.jpg +Places365_test_00139549.jpg +Places365_test_00139560.jpg +Places365_test_00139570.jpg +Places365_test_00139577.jpg +Places365_test_00139601.jpg +Places365_test_00139640.jpg +Places365_test_00139644.jpg +Places365_test_00139649.jpg +Places365_test_00139651.jpg +Places365_test_00139684.jpg +Places365_test_00139685.jpg +Places365_test_00139690.jpg +Places365_test_00139691.jpg +Places365_test_00139727.jpg +Places365_test_00139730.jpg +Places365_test_00139731.jpg +Places365_test_00139743.jpg +Places365_test_00139747.jpg +Places365_test_00139766.jpg +Places365_test_00139805.jpg +Places365_test_00139809.jpg +Places365_test_00139813.jpg +Places365_test_00139816.jpg +Places365_test_00139833.jpg +Places365_test_00139834.jpg +Places365_test_00139839.jpg +Places365_test_00139856.jpg +Places365_test_00139859.jpg +Places365_test_00139865.jpg +Places365_test_00139873.jpg +Places365_test_00139887.jpg +Places365_test_00139898.jpg +Places365_test_00139915.jpg +Places365_test_00139917.jpg +Places365_test_00139919.jpg +Places365_test_00139923.jpg +Places365_test_00139931.jpg +Places365_test_00139933.jpg +Places365_test_00139943.jpg +Places365_test_00139960.jpg +Places365_test_00139963.jpg +Places365_test_00139975.jpg +Places365_test_00140003.jpg +Places365_test_00140010.jpg +Places365_test_00140017.jpg +Places365_test_00140024.jpg +Places365_test_00140044.jpg +Places365_test_00140051.jpg +Places365_test_00140062.jpg +Places365_test_00140072.jpg +Places365_test_00140090.jpg +Places365_test_00140093.jpg +Places365_test_00140095.jpg +Places365_test_00140104.jpg +Places365_test_00140107.jpg +Places365_test_00140114.jpg +Places365_test_00140128.jpg +Places365_test_00140160.jpg +Places365_test_00140164.jpg +Places365_test_00140171.jpg +Places365_test_00140182.jpg +Places365_test_00140204.jpg +Places365_test_00140212.jpg +Places365_test_00140223.jpg +Places365_test_00140234.jpg +Places365_test_00140250.jpg +Places365_test_00140252.jpg +Places365_test_00140266.jpg +Places365_test_00140276.jpg +Places365_test_00140300.jpg +Places365_test_00140313.jpg +Places365_test_00140314.jpg +Places365_test_00140316.jpg +Places365_test_00140345.jpg +Places365_test_00140386.jpg +Places365_test_00140392.jpg +Places365_test_00140394.jpg +Places365_test_00140410.jpg +Places365_test_00140422.jpg +Places365_test_00140441.jpg +Places365_test_00140451.jpg +Places365_test_00140458.jpg +Places365_test_00140471.jpg +Places365_test_00140476.jpg +Places365_test_00140516.jpg +Places365_test_00140534.jpg +Places365_test_00140543.jpg +Places365_test_00140545.jpg +Places365_test_00140547.jpg +Places365_test_00140554.jpg +Places365_test_00140559.jpg +Places365_test_00140564.jpg +Places365_test_00140568.jpg +Places365_test_00140569.jpg +Places365_test_00140583.jpg +Places365_test_00140589.jpg +Places365_test_00140592.jpg +Places365_test_00140595.jpg +Places365_test_00140599.jpg +Places365_test_00140606.jpg +Places365_test_00140623.jpg +Places365_test_00140639.jpg +Places365_test_00140646.jpg +Places365_test_00140647.jpg +Places365_test_00140654.jpg +Places365_test_00140655.jpg +Places365_test_00140656.jpg +Places365_test_00140659.jpg +Places365_test_00140667.jpg +Places365_test_00140670.jpg +Places365_test_00140675.jpg +Places365_test_00140685.jpg +Places365_test_00140691.jpg +Places365_test_00140695.jpg +Places365_test_00140706.jpg +Places365_test_00140709.jpg +Places365_test_00140726.jpg +Places365_test_00140734.jpg +Places365_test_00140742.jpg +Places365_test_00140745.jpg +Places365_test_00140790.jpg +Places365_test_00140810.jpg +Places365_test_00140813.jpg +Places365_test_00140820.jpg +Places365_test_00140822.jpg +Places365_test_00140830.jpg +Places365_test_00140849.jpg +Places365_test_00140860.jpg +Places365_test_00140871.jpg +Places365_test_00140875.jpg +Places365_test_00140894.jpg +Places365_test_00140909.jpg +Places365_test_00140916.jpg +Places365_test_00140929.jpg +Places365_test_00140933.jpg +Places365_test_00140954.jpg +Places365_test_00140956.jpg +Places365_test_00140960.jpg +Places365_test_00140976.jpg +Places365_test_00140988.jpg +Places365_test_00140990.jpg +Places365_test_00140991.jpg +Places365_test_00140994.jpg +Places365_test_00141002.jpg +Places365_test_00141019.jpg +Places365_test_00141024.jpg +Places365_test_00141043.jpg +Places365_test_00141046.jpg +Places365_test_00141048.jpg +Places365_test_00141053.jpg +Places365_test_00141057.jpg +Places365_test_00141058.jpg +Places365_test_00141072.jpg +Places365_test_00141083.jpg +Places365_test_00141100.jpg +Places365_test_00141126.jpg +Places365_test_00141129.jpg +Places365_test_00141147.jpg +Places365_test_00141158.jpg +Places365_test_00141183.jpg +Places365_test_00141184.jpg +Places365_test_00141228.jpg +Places365_test_00141253.jpg +Places365_test_00141270.jpg +Places365_test_00141273.jpg +Places365_test_00141275.jpg +Places365_test_00141281.jpg +Places365_test_00141296.jpg +Places365_test_00141301.jpg +Places365_test_00141302.jpg +Places365_test_00141305.jpg +Places365_test_00141340.jpg +Places365_test_00141350.jpg +Places365_test_00141353.jpg +Places365_test_00141362.jpg +Places365_test_00141364.jpg +Places365_test_00141365.jpg +Places365_test_00141370.jpg +Places365_test_00141373.jpg +Places365_test_00141379.jpg +Places365_test_00141400.jpg +Places365_test_00141402.jpg +Places365_test_00141411.jpg +Places365_test_00141414.jpg +Places365_test_00141420.jpg +Places365_test_00141421.jpg +Places365_test_00141434.jpg +Places365_test_00141445.jpg +Places365_test_00141454.jpg +Places365_test_00141458.jpg +Places365_test_00141461.jpg +Places365_test_00141463.jpg +Places365_test_00141472.jpg +Places365_test_00141492.jpg +Places365_test_00141504.jpg +Places365_test_00141511.jpg +Places365_test_00141521.jpg +Places365_test_00141524.jpg +Places365_test_00141525.jpg +Places365_test_00141537.jpg +Places365_test_00141541.jpg +Places365_test_00141551.jpg +Places365_test_00141568.jpg +Places365_test_00141571.jpg +Places365_test_00141581.jpg +Places365_test_00141583.jpg +Places365_test_00141589.jpg +Places365_test_00141611.jpg +Places365_test_00141620.jpg +Places365_test_00141631.jpg +Places365_test_00141645.jpg +Places365_test_00141657.jpg +Places365_test_00141663.jpg +Places365_test_00141678.jpg +Places365_test_00141689.jpg +Places365_test_00141694.jpg +Places365_test_00141696.jpg +Places365_test_00141700.jpg +Places365_test_00141701.jpg +Places365_test_00141704.jpg +Places365_test_00141706.jpg +Places365_test_00141712.jpg +Places365_test_00141749.jpg +Places365_test_00141758.jpg +Places365_test_00141759.jpg +Places365_test_00141800.jpg +Places365_test_00141822.jpg +Places365_test_00141833.jpg +Places365_test_00141837.jpg +Places365_test_00141841.jpg +Places365_test_00141855.jpg +Places365_test_00141859.jpg +Places365_test_00141878.jpg +Places365_test_00141880.jpg +Places365_test_00141890.jpg +Places365_test_00141896.jpg +Places365_test_00141940.jpg +Places365_test_00141942.jpg +Places365_test_00141959.jpg +Places365_test_00141972.jpg +Places365_test_00141995.jpg +Places365_test_00142021.jpg +Places365_test_00142024.jpg +Places365_test_00142069.jpg +Places365_test_00142072.jpg +Places365_test_00142095.jpg +Places365_test_00142097.jpg +Places365_test_00142108.jpg +Places365_test_00142110.jpg +Places365_test_00142111.jpg +Places365_test_00142128.jpg +Places365_test_00142156.jpg +Places365_test_00142176.jpg +Places365_test_00142179.jpg +Places365_test_00142186.jpg +Places365_test_00142189.jpg +Places365_test_00142192.jpg +Places365_test_00142193.jpg +Places365_test_00142199.jpg +Places365_test_00142205.jpg +Places365_test_00142217.jpg +Places365_test_00142224.jpg +Places365_test_00142228.jpg +Places365_test_00142237.jpg +Places365_test_00142247.jpg +Places365_test_00142255.jpg +Places365_test_00142273.jpg +Places365_test_00142276.jpg +Places365_test_00142304.jpg +Places365_test_00142315.jpg +Places365_test_00142323.jpg +Places365_test_00142330.jpg +Places365_test_00142351.jpg +Places365_test_00142353.jpg +Places365_test_00142357.jpg +Places365_test_00142360.jpg +Places365_test_00142368.jpg +Places365_test_00142378.jpg +Places365_test_00142389.jpg +Places365_test_00142392.jpg +Places365_test_00142396.jpg +Places365_test_00142407.jpg +Places365_test_00142410.jpg +Places365_test_00142426.jpg +Places365_test_00142429.jpg +Places365_test_00142431.jpg +Places365_test_00142444.jpg +Places365_test_00142457.jpg +Places365_test_00142473.jpg +Places365_test_00142486.jpg +Places365_test_00142491.jpg +Places365_test_00142493.jpg +Places365_test_00142494.jpg +Places365_test_00142509.jpg +Places365_test_00142517.jpg +Places365_test_00142520.jpg +Places365_test_00142542.jpg +Places365_test_00142543.jpg +Places365_test_00142547.jpg +Places365_test_00142557.jpg +Places365_test_00142568.jpg +Places365_test_00142570.jpg +Places365_test_00142580.jpg +Places365_test_00142586.jpg +Places365_test_00142592.jpg +Places365_test_00142595.jpg +Places365_test_00142621.jpg +Places365_test_00142644.jpg +Places365_test_00142646.jpg +Places365_test_00142648.jpg +Places365_test_00142654.jpg +Places365_test_00142667.jpg +Places365_test_00142680.jpg +Places365_test_00142681.jpg +Places365_test_00142691.jpg +Places365_test_00142694.jpg +Places365_test_00142696.jpg +Places365_test_00142700.jpg +Places365_test_00142711.jpg +Places365_test_00142722.jpg +Places365_test_00142732.jpg +Places365_test_00142737.jpg +Places365_test_00142738.jpg +Places365_test_00142741.jpg +Places365_test_00142753.jpg +Places365_test_00142770.jpg +Places365_test_00142774.jpg +Places365_test_00142780.jpg +Places365_test_00142795.jpg +Places365_test_00142806.jpg +Places365_test_00142807.jpg +Places365_test_00142823.jpg +Places365_test_00142832.jpg +Places365_test_00142841.jpg +Places365_test_00142861.jpg +Places365_test_00142878.jpg +Places365_test_00142887.jpg +Places365_test_00142888.jpg +Places365_test_00142889.jpg +Places365_test_00142895.jpg +Places365_test_00142920.jpg +Places365_test_00142929.jpg +Places365_test_00142933.jpg +Places365_test_00142946.jpg +Places365_test_00142967.jpg +Places365_test_00143012.jpg +Places365_test_00143018.jpg +Places365_test_00143020.jpg +Places365_test_00143022.jpg +Places365_test_00143023.jpg +Places365_test_00143024.jpg +Places365_test_00143032.jpg +Places365_test_00143060.jpg +Places365_test_00143081.jpg +Places365_test_00143130.jpg +Places365_test_00143151.jpg +Places365_test_00143152.jpg +Places365_test_00143175.jpg +Places365_test_00143189.jpg +Places365_test_00143194.jpg +Places365_test_00143195.jpg +Places365_test_00143202.jpg +Places365_test_00143211.jpg +Places365_test_00143214.jpg +Places365_test_00143216.jpg +Places365_test_00143217.jpg +Places365_test_00143218.jpg +Places365_test_00143258.jpg +Places365_test_00143266.jpg +Places365_test_00143278.jpg +Places365_test_00143288.jpg +Places365_test_00143292.jpg +Places365_test_00143302.jpg +Places365_test_00143303.jpg +Places365_test_00143320.jpg +Places365_test_00143327.jpg +Places365_test_00143340.jpg +Places365_test_00143359.jpg +Places365_test_00143372.jpg +Places365_test_00143384.jpg +Places365_test_00143388.jpg +Places365_test_00143398.jpg +Places365_test_00143402.jpg +Places365_test_00143406.jpg +Places365_test_00143436.jpg +Places365_test_00143440.jpg +Places365_test_00143447.jpg +Places365_test_00143468.jpg +Places365_test_00143475.jpg +Places365_test_00143497.jpg +Places365_test_00143506.jpg +Places365_test_00143508.jpg +Places365_test_00143547.jpg +Places365_test_00143552.jpg +Places365_test_00143560.jpg +Places365_test_00143562.jpg +Places365_test_00143573.jpg +Places365_test_00143604.jpg +Places365_test_00143679.jpg +Places365_test_00143715.jpg +Places365_test_00143720.jpg +Places365_test_00143734.jpg +Places365_test_00143739.jpg +Places365_test_00143753.jpg +Places365_test_00143805.jpg +Places365_test_00143816.jpg +Places365_test_00143829.jpg +Places365_test_00143839.jpg +Places365_test_00143840.jpg +Places365_test_00143845.jpg +Places365_test_00143846.jpg +Places365_test_00143859.jpg +Places365_test_00143866.jpg +Places365_test_00143868.jpg +Places365_test_00143882.jpg +Places365_test_00143892.jpg +Places365_test_00143899.jpg +Places365_test_00143916.jpg +Places365_test_00143921.jpg +Places365_test_00143922.jpg +Places365_test_00143927.jpg +Places365_test_00143947.jpg +Places365_test_00143983.jpg +Places365_test_00143986.jpg +Places365_test_00143989.jpg +Places365_test_00143993.jpg +Places365_test_00144002.jpg +Places365_test_00144012.jpg +Places365_test_00144018.jpg +Places365_test_00144057.jpg +Places365_test_00144079.jpg +Places365_test_00144100.jpg +Places365_test_00144133.jpg +Places365_test_00144138.jpg +Places365_test_00144140.jpg +Places365_test_00144143.jpg +Places365_test_00144166.jpg +Places365_test_00144179.jpg +Places365_test_00144187.jpg +Places365_test_00144200.jpg +Places365_test_00144211.jpg +Places365_test_00144216.jpg +Places365_test_00144217.jpg +Places365_test_00144221.jpg +Places365_test_00144239.jpg +Places365_test_00144245.jpg +Places365_test_00144257.jpg +Places365_test_00144261.jpg +Places365_test_00144263.jpg +Places365_test_00144273.jpg +Places365_test_00144279.jpg +Places365_test_00144295.jpg +Places365_test_00144302.jpg +Places365_test_00144325.jpg +Places365_test_00144333.jpg +Places365_test_00144339.jpg +Places365_test_00144362.jpg +Places365_test_00144368.jpg +Places365_test_00144379.jpg +Places365_test_00144389.jpg +Places365_test_00144393.jpg +Places365_test_00144415.jpg +Places365_test_00144425.jpg +Places365_test_00144432.jpg +Places365_test_00144438.jpg +Places365_test_00144440.jpg +Places365_test_00144462.jpg +Places365_test_00144477.jpg +Places365_test_00144492.jpg +Places365_test_00144495.jpg +Places365_test_00144499.jpg +Places365_test_00144503.jpg +Places365_test_00144507.jpg +Places365_test_00144520.jpg +Places365_test_00144522.jpg +Places365_test_00144536.jpg +Places365_test_00144545.jpg +Places365_test_00144547.jpg +Places365_test_00144556.jpg +Places365_test_00144560.jpg +Places365_test_00144562.jpg +Places365_test_00144563.jpg +Places365_test_00144573.jpg +Places365_test_00144580.jpg +Places365_test_00144582.jpg +Places365_test_00144595.jpg +Places365_test_00144621.jpg +Places365_test_00144641.jpg +Places365_test_00144663.jpg +Places365_test_00144670.jpg +Places365_test_00144679.jpg +Places365_test_00144681.jpg +Places365_test_00144696.jpg +Places365_test_00144701.jpg +Places365_test_00144709.jpg +Places365_test_00144714.jpg +Places365_test_00144720.jpg +Places365_test_00144726.jpg +Places365_test_00144737.jpg +Places365_test_00144744.jpg +Places365_test_00144758.jpg +Places365_test_00144762.jpg +Places365_test_00144769.jpg +Places365_test_00144771.jpg +Places365_test_00144782.jpg +Places365_test_00144788.jpg +Places365_test_00144806.jpg +Places365_test_00144811.jpg +Places365_test_00144813.jpg +Places365_test_00144834.jpg +Places365_test_00144842.jpg +Places365_test_00144867.jpg +Places365_test_00144870.jpg +Places365_test_00144871.jpg +Places365_test_00144877.jpg +Places365_test_00144901.jpg +Places365_test_00144903.jpg +Places365_test_00144915.jpg +Places365_test_00144931.jpg +Places365_test_00144957.jpg +Places365_test_00144965.jpg +Places365_test_00144983.jpg +Places365_test_00144985.jpg +Places365_test_00144988.jpg +Places365_test_00144993.jpg +Places365_test_00145047.jpg +Places365_test_00145051.jpg +Places365_test_00145052.jpg +Places365_test_00145061.jpg +Places365_test_00145071.jpg +Places365_test_00145079.jpg +Places365_test_00145082.jpg +Places365_test_00145085.jpg +Places365_test_00145087.jpg +Places365_test_00145100.jpg +Places365_test_00145102.jpg +Places365_test_00145153.jpg +Places365_test_00145162.jpg +Places365_test_00145180.jpg +Places365_test_00145186.jpg +Places365_test_00145195.jpg +Places365_test_00145200.jpg +Places365_test_00145203.jpg +Places365_test_00145222.jpg +Places365_test_00145273.jpg +Places365_test_00145299.jpg +Places365_test_00145302.jpg +Places365_test_00145319.jpg +Places365_test_00145327.jpg +Places365_test_00145338.jpg +Places365_test_00145348.jpg +Places365_test_00145349.jpg +Places365_test_00145357.jpg +Places365_test_00145360.jpg +Places365_test_00145383.jpg +Places365_test_00145403.jpg +Places365_test_00145428.jpg +Places365_test_00145430.jpg +Places365_test_00145432.jpg +Places365_test_00145445.jpg +Places365_test_00145446.jpg +Places365_test_00145447.jpg +Places365_test_00145455.jpg +Places365_test_00145458.jpg +Places365_test_00145459.jpg +Places365_test_00145475.jpg +Places365_test_00145476.jpg +Places365_test_00145493.jpg +Places365_test_00145526.jpg +Places365_test_00145547.jpg +Places365_test_00145552.jpg +Places365_test_00145558.jpg +Places365_test_00145582.jpg +Places365_test_00145594.jpg +Places365_test_00145609.jpg +Places365_test_00145610.jpg +Places365_test_00145616.jpg +Places365_test_00145646.jpg +Places365_test_00145655.jpg +Places365_test_00145683.jpg +Places365_test_00145690.jpg +Places365_test_00145703.jpg +Places365_test_00145705.jpg +Places365_test_00145720.jpg +Places365_test_00145723.jpg +Places365_test_00145770.jpg +Places365_test_00145778.jpg +Places365_test_00145792.jpg +Places365_test_00145813.jpg +Places365_test_00145826.jpg +Places365_test_00145838.jpg +Places365_test_00145849.jpg +Places365_test_00145852.jpg +Places365_test_00145858.jpg +Places365_test_00145863.jpg +Places365_test_00145874.jpg +Places365_test_00145891.jpg +Places365_test_00145894.jpg +Places365_test_00145913.jpg +Places365_test_00145921.jpg +Places365_test_00145922.jpg +Places365_test_00145935.jpg +Places365_test_00145948.jpg +Places365_test_00145984.jpg +Places365_test_00145989.jpg +Places365_test_00145995.jpg +Places365_test_00146007.jpg +Places365_test_00146025.jpg +Places365_test_00146031.jpg +Places365_test_00146038.jpg +Places365_test_00146041.jpg +Places365_test_00146050.jpg +Places365_test_00146075.jpg +Places365_test_00146085.jpg +Places365_test_00146089.jpg +Places365_test_00146090.jpg +Places365_test_00146099.jpg +Places365_test_00146104.jpg +Places365_test_00146108.jpg +Places365_test_00146110.jpg +Places365_test_00146111.jpg +Places365_test_00146128.jpg +Places365_test_00146130.jpg +Places365_test_00146150.jpg +Places365_test_00146180.jpg +Places365_test_00146184.jpg +Places365_test_00146208.jpg +Places365_test_00146223.jpg +Places365_test_00146239.jpg +Places365_test_00146248.jpg +Places365_test_00146256.jpg +Places365_test_00146259.jpg +Places365_test_00146260.jpg +Places365_test_00146268.jpg +Places365_test_00146273.jpg +Places365_test_00146303.jpg +Places365_test_00146314.jpg +Places365_test_00146325.jpg +Places365_test_00146327.jpg +Places365_test_00146334.jpg +Places365_test_00146346.jpg +Places365_test_00146352.jpg +Places365_test_00146362.jpg +Places365_test_00146380.jpg +Places365_test_00146381.jpg +Places365_test_00146383.jpg +Places365_test_00146388.jpg +Places365_test_00146390.jpg +Places365_test_00146393.jpg +Places365_test_00146400.jpg +Places365_test_00146419.jpg +Places365_test_00146438.jpg +Places365_test_00146459.jpg +Places365_test_00146460.jpg +Places365_test_00146469.jpg +Places365_test_00146488.jpg +Places365_test_00146508.jpg +Places365_test_00146542.jpg +Places365_test_00146547.jpg +Places365_test_00146548.jpg +Places365_test_00146562.jpg +Places365_test_00146566.jpg +Places365_test_00146569.jpg +Places365_test_00146578.jpg +Places365_test_00146585.jpg +Places365_test_00146591.jpg +Places365_test_00146596.jpg +Places365_test_00146608.jpg +Places365_test_00146614.jpg +Places365_test_00146620.jpg +Places365_test_00146626.jpg +Places365_test_00146645.jpg +Places365_test_00146657.jpg +Places365_test_00146669.jpg +Places365_test_00146673.jpg +Places365_test_00146675.jpg +Places365_test_00146677.jpg +Places365_test_00146681.jpg +Places365_test_00146708.jpg +Places365_test_00146720.jpg +Places365_test_00146721.jpg +Places365_test_00146723.jpg +Places365_test_00146724.jpg +Places365_test_00146727.jpg +Places365_test_00146735.jpg +Places365_test_00146772.jpg +Places365_test_00146773.jpg +Places365_test_00146782.jpg +Places365_test_00146786.jpg +Places365_test_00146794.jpg +Places365_test_00146815.jpg +Places365_test_00146824.jpg +Places365_test_00146833.jpg +Places365_test_00146835.jpg +Places365_test_00146836.jpg +Places365_test_00146846.jpg +Places365_test_00146859.jpg +Places365_test_00146867.jpg +Places365_test_00146872.jpg +Places365_test_00146880.jpg +Places365_test_00146906.jpg +Places365_test_00146927.jpg +Places365_test_00146930.jpg +Places365_test_00146935.jpg +Places365_test_00146938.jpg +Places365_test_00146948.jpg +Places365_test_00146949.jpg +Places365_test_00146955.jpg +Places365_test_00146988.jpg +Places365_test_00146994.jpg +Places365_test_00147005.jpg +Places365_test_00147035.jpg +Places365_test_00147037.jpg +Places365_test_00147039.jpg +Places365_test_00147062.jpg +Places365_test_00147076.jpg +Places365_test_00147089.jpg +Places365_test_00147091.jpg +Places365_test_00147110.jpg +Places365_test_00147111.jpg +Places365_test_00147113.jpg +Places365_test_00147131.jpg +Places365_test_00147132.jpg +Places365_test_00147142.jpg +Places365_test_00147146.jpg +Places365_test_00147152.jpg +Places365_test_00147158.jpg +Places365_test_00147159.jpg +Places365_test_00147164.jpg +Places365_test_00147167.jpg +Places365_test_00147187.jpg +Places365_test_00147194.jpg +Places365_test_00147202.jpg +Places365_test_00147206.jpg +Places365_test_00147223.jpg +Places365_test_00147250.jpg +Places365_test_00147257.jpg +Places365_test_00147258.jpg +Places365_test_00147278.jpg +Places365_test_00147283.jpg +Places365_test_00147290.jpg +Places365_test_00147291.jpg +Places365_test_00147292.jpg +Places365_test_00147300.jpg +Places365_test_00147309.jpg +Places365_test_00147324.jpg +Places365_test_00147333.jpg +Places365_test_00147337.jpg +Places365_test_00147342.jpg +Places365_test_00147354.jpg +Places365_test_00147356.jpg +Places365_test_00147382.jpg +Places365_test_00147397.jpg +Places365_test_00147400.jpg +Places365_test_00147403.jpg +Places365_test_00147406.jpg +Places365_test_00147420.jpg +Places365_test_00147423.jpg +Places365_test_00147472.jpg +Places365_test_00147475.jpg +Places365_test_00147489.jpg +Places365_test_00147491.jpg +Places365_test_00147494.jpg +Places365_test_00147530.jpg +Places365_test_00147542.jpg +Places365_test_00147544.jpg +Places365_test_00147546.jpg +Places365_test_00147551.jpg +Places365_test_00147555.jpg +Places365_test_00147569.jpg +Places365_test_00147571.jpg +Places365_test_00147575.jpg +Places365_test_00147600.jpg +Places365_test_00147602.jpg +Places365_test_00147630.jpg +Places365_test_00147634.jpg +Places365_test_00147647.jpg +Places365_test_00147655.jpg +Places365_test_00147688.jpg +Places365_test_00147693.jpg +Places365_test_00147707.jpg +Places365_test_00147714.jpg +Places365_test_00147716.jpg +Places365_test_00147732.jpg +Places365_test_00147738.jpg +Places365_test_00147745.jpg +Places365_test_00147753.jpg +Places365_test_00147758.jpg +Places365_test_00147759.jpg +Places365_test_00147768.jpg +Places365_test_00147775.jpg +Places365_test_00147794.jpg +Places365_test_00147803.jpg +Places365_test_00147809.jpg +Places365_test_00147814.jpg +Places365_test_00147848.jpg +Places365_test_00147849.jpg +Places365_test_00147857.jpg +Places365_test_00147876.jpg +Places365_test_00147878.jpg +Places365_test_00147884.jpg +Places365_test_00147914.jpg +Places365_test_00147929.jpg +Places365_test_00147938.jpg +Places365_test_00147951.jpg +Places365_test_00147971.jpg +Places365_test_00147975.jpg +Places365_test_00147977.jpg +Places365_test_00148007.jpg +Places365_test_00148011.jpg +Places365_test_00148013.jpg +Places365_test_00148037.jpg +Places365_test_00148064.jpg +Places365_test_00148071.jpg +Places365_test_00148088.jpg +Places365_test_00148100.jpg +Places365_test_00148101.jpg +Places365_test_00148106.jpg +Places365_test_00148123.jpg +Places365_test_00148128.jpg +Places365_test_00148131.jpg +Places365_test_00148147.jpg +Places365_test_00148149.jpg +Places365_test_00148157.jpg +Places365_test_00148188.jpg +Places365_test_00148199.jpg +Places365_test_00148220.jpg +Places365_test_00148225.jpg +Places365_test_00148229.jpg +Places365_test_00148248.jpg +Places365_test_00148252.jpg +Places365_test_00148263.jpg +Places365_test_00148265.jpg +Places365_test_00148269.jpg +Places365_test_00148286.jpg +Places365_test_00148287.jpg +Places365_test_00148304.jpg +Places365_test_00148306.jpg +Places365_test_00148313.jpg +Places365_test_00148322.jpg +Places365_test_00148334.jpg +Places365_test_00148343.jpg +Places365_test_00148355.jpg +Places365_test_00148356.jpg +Places365_test_00148358.jpg +Places365_test_00148360.jpg +Places365_test_00148389.jpg +Places365_test_00148418.jpg +Places365_test_00148433.jpg +Places365_test_00148439.jpg +Places365_test_00148445.jpg +Places365_test_00148446.jpg +Places365_test_00148452.jpg +Places365_test_00148473.jpg +Places365_test_00148475.jpg +Places365_test_00148478.jpg +Places365_test_00148481.jpg +Places365_test_00148482.jpg +Places365_test_00148484.jpg +Places365_test_00148507.jpg +Places365_test_00148522.jpg +Places365_test_00148523.jpg +Places365_test_00148529.jpg +Places365_test_00148531.jpg +Places365_test_00148546.jpg +Places365_test_00148570.jpg +Places365_test_00148580.jpg +Places365_test_00148582.jpg +Places365_test_00148587.jpg +Places365_test_00148589.jpg +Places365_test_00148593.jpg +Places365_test_00148596.jpg +Places365_test_00148597.jpg +Places365_test_00148598.jpg +Places365_test_00148611.jpg +Places365_test_00148616.jpg +Places365_test_00148632.jpg +Places365_test_00148642.jpg +Places365_test_00148644.jpg +Places365_test_00148680.jpg +Places365_test_00148686.jpg +Places365_test_00148703.jpg +Places365_test_00148710.jpg +Places365_test_00148719.jpg +Places365_test_00148728.jpg +Places365_test_00148738.jpg +Places365_test_00148754.jpg +Places365_test_00148760.jpg +Places365_test_00148762.jpg +Places365_test_00148772.jpg +Places365_test_00148796.jpg +Places365_test_00148819.jpg +Places365_test_00148833.jpg +Places365_test_00148834.jpg +Places365_test_00148875.jpg +Places365_test_00148884.jpg +Places365_test_00148887.jpg +Places365_test_00148949.jpg +Places365_test_00148956.jpg +Places365_test_00148971.jpg +Places365_test_00148981.jpg +Places365_test_00148985.jpg +Places365_test_00149000.jpg +Places365_test_00149026.jpg +Places365_test_00149032.jpg +Places365_test_00149033.jpg +Places365_test_00149052.jpg +Places365_test_00149058.jpg +Places365_test_00149071.jpg +Places365_test_00149077.jpg +Places365_test_00149123.jpg +Places365_test_00149134.jpg +Places365_test_00149137.jpg +Places365_test_00149155.jpg +Places365_test_00149165.jpg +Places365_test_00149183.jpg +Places365_test_00149204.jpg +Places365_test_00149207.jpg +Places365_test_00149219.jpg +Places365_test_00149230.jpg +Places365_test_00149236.jpg +Places365_test_00149237.jpg +Places365_test_00149253.jpg +Places365_test_00149273.jpg +Places365_test_00149278.jpg +Places365_test_00149285.jpg +Places365_test_00149289.jpg +Places365_test_00149290.jpg +Places365_test_00149310.jpg +Places365_test_00149314.jpg +Places365_test_00149321.jpg +Places365_test_00149325.jpg +Places365_test_00149329.jpg +Places365_test_00149343.jpg +Places365_test_00149347.jpg +Places365_test_00149361.jpg +Places365_test_00149367.jpg +Places365_test_00149383.jpg +Places365_test_00149403.jpg +Places365_test_00149413.jpg +Places365_test_00149420.jpg +Places365_test_00149424.jpg +Places365_test_00149427.jpg +Places365_test_00149436.jpg +Places365_test_00149450.jpg +Places365_test_00149458.jpg +Places365_test_00149461.jpg +Places365_test_00149470.jpg +Places365_test_00149488.jpg +Places365_test_00149494.jpg +Places365_test_00149523.jpg +Places365_test_00149528.jpg +Places365_test_00149541.jpg +Places365_test_00149557.jpg +Places365_test_00149561.jpg +Places365_test_00149576.jpg +Places365_test_00149582.jpg +Places365_test_00149587.jpg +Places365_test_00149598.jpg +Places365_test_00149602.jpg +Places365_test_00149612.jpg +Places365_test_00149642.jpg +Places365_test_00149657.jpg +Places365_test_00149662.jpg +Places365_test_00149669.jpg +Places365_test_00149672.jpg +Places365_test_00149674.jpg +Places365_test_00149686.jpg +Places365_test_00149687.jpg +Places365_test_00149690.jpg +Places365_test_00149715.jpg +Places365_test_00149723.jpg +Places365_test_00149732.jpg +Places365_test_00149744.jpg +Places365_test_00149754.jpg +Places365_test_00149763.jpg +Places365_test_00149769.jpg +Places365_test_00149774.jpg +Places365_test_00149775.jpg +Places365_test_00149787.jpg +Places365_test_00149799.jpg +Places365_test_00149802.jpg +Places365_test_00149822.jpg +Places365_test_00149833.jpg +Places365_test_00149841.jpg +Places365_test_00149845.jpg +Places365_test_00149853.jpg +Places365_test_00149882.jpg +Places365_test_00149884.jpg +Places365_test_00149887.jpg +Places365_test_00149894.jpg +Places365_test_00149896.jpg +Places365_test_00149904.jpg +Places365_test_00149913.jpg +Places365_test_00149914.jpg +Places365_test_00149937.jpg +Places365_test_00149955.jpg +Places365_test_00149975.jpg +Places365_test_00149980.jpg +Places365_test_00149991.jpg +Places365_test_00149992.jpg +Places365_test_00150003.jpg +Places365_test_00150016.jpg +Places365_test_00150022.jpg +Places365_test_00150030.jpg +Places365_test_00150033.jpg +Places365_test_00150057.jpg +Places365_test_00150060.jpg +Places365_test_00150066.jpg +Places365_test_00150101.jpg +Places365_test_00150105.jpg +Places365_test_00150116.jpg +Places365_test_00150137.jpg +Places365_test_00150147.jpg +Places365_test_00150182.jpg +Places365_test_00150207.jpg +Places365_test_00150224.jpg +Places365_test_00150240.jpg +Places365_test_00150248.jpg +Places365_test_00150270.jpg +Places365_test_00150276.jpg +Places365_test_00150293.jpg +Places365_test_00150327.jpg +Places365_test_00150333.jpg +Places365_test_00150340.jpg +Places365_test_00150355.jpg +Places365_test_00150357.jpg +Places365_test_00150362.jpg +Places365_test_00150375.jpg +Places365_test_00150384.jpg +Places365_test_00150390.jpg +Places365_test_00150396.jpg +Places365_test_00150405.jpg +Places365_test_00150420.jpg +Places365_test_00150442.jpg +Places365_test_00150443.jpg +Places365_test_00150446.jpg +Places365_test_00150449.jpg +Places365_test_00150474.jpg +Places365_test_00150475.jpg +Places365_test_00150481.jpg +Places365_test_00150491.jpg +Places365_test_00150495.jpg +Places365_test_00150501.jpg +Places365_test_00150504.jpg +Places365_test_00150523.jpg +Places365_test_00150538.jpg +Places365_test_00150547.jpg +Places365_test_00150553.jpg +Places365_test_00150560.jpg +Places365_test_00150576.jpg +Places365_test_00150605.jpg +Places365_test_00150609.jpg +Places365_test_00150653.jpg +Places365_test_00150677.jpg +Places365_test_00150696.jpg +Places365_test_00150701.jpg +Places365_test_00150702.jpg +Places365_test_00150733.jpg +Places365_test_00150735.jpg +Places365_test_00150743.jpg +Places365_test_00150745.jpg +Places365_test_00150753.jpg +Places365_test_00150761.jpg +Places365_test_00150763.jpg +Places365_test_00150769.jpg +Places365_test_00150772.jpg +Places365_test_00150779.jpg +Places365_test_00150794.jpg +Places365_test_00150818.jpg +Places365_test_00150822.jpg +Places365_test_00150826.jpg +Places365_test_00150829.jpg +Places365_test_00150845.jpg +Places365_test_00150870.jpg +Places365_test_00150881.jpg +Places365_test_00150893.jpg +Places365_test_00150903.jpg +Places365_test_00150926.jpg +Places365_test_00150927.jpg +Places365_test_00150942.jpg +Places365_test_00150947.jpg +Places365_test_00150949.jpg +Places365_test_00150954.jpg +Places365_test_00150992.jpg +Places365_test_00150996.jpg +Places365_test_00151018.jpg +Places365_test_00151026.jpg +Places365_test_00151051.jpg +Places365_test_00151108.jpg +Places365_test_00151128.jpg +Places365_test_00151130.jpg +Places365_test_00151131.jpg +Places365_test_00151136.jpg +Places365_test_00151171.jpg +Places365_test_00151179.jpg +Places365_test_00151186.jpg +Places365_test_00151191.jpg +Places365_test_00151196.jpg +Places365_test_00151214.jpg +Places365_test_00151222.jpg +Places365_test_00151227.jpg +Places365_test_00151236.jpg +Places365_test_00151238.jpg +Places365_test_00151242.jpg +Places365_test_00151248.jpg +Places365_test_00151249.jpg +Places365_test_00151257.jpg +Places365_test_00151265.jpg +Places365_test_00151272.jpg +Places365_test_00151274.jpg +Places365_test_00151275.jpg +Places365_test_00151282.jpg +Places365_test_00151322.jpg +Places365_test_00151329.jpg +Places365_test_00151342.jpg +Places365_test_00151344.jpg +Places365_test_00151352.jpg +Places365_test_00151361.jpg +Places365_test_00151364.jpg +Places365_test_00151368.jpg +Places365_test_00151384.jpg +Places365_test_00151391.jpg +Places365_test_00151428.jpg +Places365_test_00151436.jpg +Places365_test_00151438.jpg +Places365_test_00151445.jpg +Places365_test_00151448.jpg +Places365_test_00151461.jpg +Places365_test_00151469.jpg +Places365_test_00151499.jpg +Places365_test_00151503.jpg +Places365_test_00151506.jpg +Places365_test_00151515.jpg +Places365_test_00151525.jpg +Places365_test_00151545.jpg +Places365_test_00151557.jpg +Places365_test_00151563.jpg +Places365_test_00151571.jpg +Places365_test_00151572.jpg +Places365_test_00151573.jpg +Places365_test_00151581.jpg +Places365_test_00151593.jpg +Places365_test_00151608.jpg +Places365_test_00151618.jpg +Places365_test_00151624.jpg +Places365_test_00151628.jpg +Places365_test_00151641.jpg +Places365_test_00151645.jpg +Places365_test_00151661.jpg +Places365_test_00151669.jpg +Places365_test_00151672.jpg +Places365_test_00151700.jpg +Places365_test_00151701.jpg +Places365_test_00151704.jpg +Places365_test_00151705.jpg +Places365_test_00151721.jpg +Places365_test_00151733.jpg +Places365_test_00151737.jpg +Places365_test_00151746.jpg +Places365_test_00151763.jpg +Places365_test_00151772.jpg +Places365_test_00151786.jpg +Places365_test_00151788.jpg +Places365_test_00151818.jpg +Places365_test_00151868.jpg +Places365_test_00151872.jpg +Places365_test_00151892.jpg +Places365_test_00151900.jpg +Places365_test_00151902.jpg +Places365_test_00151916.jpg +Places365_test_00151922.jpg +Places365_test_00151934.jpg +Places365_test_00151937.jpg +Places365_test_00151952.jpg +Places365_test_00151964.jpg +Places365_test_00151966.jpg +Places365_test_00151975.jpg +Places365_test_00151977.jpg +Places365_test_00151986.jpg +Places365_test_00151987.jpg +Places365_test_00152011.jpg +Places365_test_00152037.jpg +Places365_test_00152043.jpg +Places365_test_00152059.jpg +Places365_test_00152067.jpg +Places365_test_00152070.jpg +Places365_test_00152075.jpg +Places365_test_00152083.jpg +Places365_test_00152094.jpg +Places365_test_00152107.jpg +Places365_test_00152119.jpg +Places365_test_00152122.jpg +Places365_test_00152148.jpg +Places365_test_00152151.jpg +Places365_test_00152203.jpg +Places365_test_00152223.jpg +Places365_test_00152225.jpg +Places365_test_00152240.jpg +Places365_test_00152243.jpg +Places365_test_00152244.jpg +Places365_test_00152262.jpg +Places365_test_00152271.jpg +Places365_test_00152273.jpg +Places365_test_00152285.jpg +Places365_test_00152286.jpg +Places365_test_00152291.jpg +Places365_test_00152292.jpg +Places365_test_00152302.jpg +Places365_test_00152308.jpg +Places365_test_00152313.jpg +Places365_test_00152314.jpg +Places365_test_00152317.jpg +Places365_test_00152323.jpg +Places365_test_00152349.jpg +Places365_test_00152352.jpg +Places365_test_00152377.jpg +Places365_test_00152382.jpg +Places365_test_00152398.jpg +Places365_test_00152405.jpg +Places365_test_00152422.jpg +Places365_test_00152448.jpg +Places365_test_00152450.jpg +Places365_test_00152455.jpg +Places365_test_00152466.jpg +Places365_test_00152468.jpg +Places365_test_00152469.jpg +Places365_test_00152472.jpg +Places365_test_00152513.jpg +Places365_test_00152524.jpg +Places365_test_00152527.jpg +Places365_test_00152546.jpg +Places365_test_00152550.jpg +Places365_test_00152552.jpg +Places365_test_00152578.jpg +Places365_test_00152581.jpg +Places365_test_00152592.jpg +Places365_test_00152600.jpg +Places365_test_00152617.jpg +Places365_test_00152641.jpg +Places365_test_00152661.jpg +Places365_test_00152677.jpg +Places365_test_00152678.jpg +Places365_test_00152679.jpg +Places365_test_00152690.jpg +Places365_test_00152698.jpg +Places365_test_00152710.jpg +Places365_test_00152721.jpg +Places365_test_00152729.jpg +Places365_test_00152731.jpg +Places365_test_00152735.jpg +Places365_test_00152741.jpg +Places365_test_00152748.jpg +Places365_test_00152791.jpg +Places365_test_00152792.jpg +Places365_test_00152807.jpg +Places365_test_00152846.jpg +Places365_test_00152850.jpg +Places365_test_00152858.jpg +Places365_test_00152859.jpg +Places365_test_00152873.jpg +Places365_test_00152895.jpg +Places365_test_00152920.jpg +Places365_test_00152928.jpg +Places365_test_00152937.jpg +Places365_test_00152945.jpg +Places365_test_00152951.jpg +Places365_test_00152966.jpg +Places365_test_00152969.jpg +Places365_test_00152976.jpg +Places365_test_00152990.jpg +Places365_test_00153007.jpg +Places365_test_00153024.jpg +Places365_test_00153032.jpg +Places365_test_00153059.jpg +Places365_test_00153066.jpg +Places365_test_00153068.jpg +Places365_test_00153069.jpg +Places365_test_00153083.jpg +Places365_test_00153092.jpg +Places365_test_00153094.jpg +Places365_test_00153099.jpg +Places365_test_00153102.jpg +Places365_test_00153103.jpg +Places365_test_00153106.jpg +Places365_test_00153124.jpg +Places365_test_00153125.jpg +Places365_test_00153134.jpg +Places365_test_00153143.jpg +Places365_test_00153149.jpg +Places365_test_00153152.jpg +Places365_test_00153158.jpg +Places365_test_00153192.jpg +Places365_test_00153196.jpg +Places365_test_00153204.jpg +Places365_test_00153210.jpg +Places365_test_00153211.jpg +Places365_test_00153225.jpg +Places365_test_00153232.jpg +Places365_test_00153241.jpg +Places365_test_00153243.jpg +Places365_test_00153244.jpg +Places365_test_00153249.jpg +Places365_test_00153266.jpg +Places365_test_00153272.jpg +Places365_test_00153296.jpg +Places365_test_00153302.jpg +Places365_test_00153324.jpg +Places365_test_00153347.jpg +Places365_test_00153362.jpg +Places365_test_00153365.jpg +Places365_test_00153368.jpg +Places365_test_00153375.jpg +Places365_test_00153379.jpg +Places365_test_00153388.jpg +Places365_test_00153395.jpg +Places365_test_00153418.jpg +Places365_test_00153433.jpg +Places365_test_00153437.jpg +Places365_test_00153438.jpg +Places365_test_00153469.jpg +Places365_test_00153480.jpg +Places365_test_00153485.jpg +Places365_test_00153493.jpg +Places365_test_00153500.jpg +Places365_test_00153502.jpg +Places365_test_00153503.jpg +Places365_test_00153508.jpg +Places365_test_00153511.jpg +Places365_test_00153520.jpg +Places365_test_00153539.jpg +Places365_test_00153549.jpg +Places365_test_00153560.jpg +Places365_test_00153572.jpg +Places365_test_00153573.jpg +Places365_test_00153583.jpg +Places365_test_00153587.jpg +Places365_test_00153589.jpg +Places365_test_00153598.jpg +Places365_test_00153610.jpg +Places365_test_00153616.jpg +Places365_test_00153621.jpg +Places365_test_00153624.jpg +Places365_test_00153645.jpg +Places365_test_00153646.jpg +Places365_test_00153677.jpg +Places365_test_00153684.jpg +Places365_test_00153686.jpg +Places365_test_00153689.jpg +Places365_test_00153697.jpg +Places365_test_00153733.jpg +Places365_test_00153738.jpg +Places365_test_00153739.jpg +Places365_test_00153740.jpg +Places365_test_00153758.jpg +Places365_test_00153759.jpg +Places365_test_00153768.jpg +Places365_test_00153770.jpg +Places365_test_00153774.jpg +Places365_test_00153779.jpg +Places365_test_00153788.jpg +Places365_test_00153793.jpg +Places365_test_00153795.jpg +Places365_test_00153803.jpg +Places365_test_00153804.jpg +Places365_test_00153813.jpg +Places365_test_00153820.jpg +Places365_test_00153834.jpg +Places365_test_00153844.jpg +Places365_test_00153863.jpg +Places365_test_00153871.jpg +Places365_test_00153873.jpg +Places365_test_00153878.jpg +Places365_test_00153899.jpg +Places365_test_00153900.jpg +Places365_test_00153903.jpg +Places365_test_00153933.jpg +Places365_test_00153934.jpg +Places365_test_00153943.jpg +Places365_test_00153947.jpg +Places365_test_00153957.jpg +Places365_test_00153971.jpg +Places365_test_00153980.jpg +Places365_test_00153981.jpg +Places365_test_00153985.jpg +Places365_test_00153991.jpg +Places365_test_00154009.jpg +Places365_test_00154021.jpg +Places365_test_00154023.jpg +Places365_test_00154024.jpg +Places365_test_00154025.jpg +Places365_test_00154027.jpg +Places365_test_00154042.jpg +Places365_test_00154046.jpg +Places365_test_00154061.jpg +Places365_test_00154063.jpg +Places365_test_00154069.jpg +Places365_test_00154076.jpg +Places365_test_00154079.jpg +Places365_test_00154098.jpg +Places365_test_00154106.jpg +Places365_test_00154115.jpg +Places365_test_00154127.jpg +Places365_test_00154153.jpg +Places365_test_00154157.jpg +Places365_test_00154163.jpg +Places365_test_00154172.jpg +Places365_test_00154238.jpg +Places365_test_00154239.jpg +Places365_test_00154241.jpg +Places365_test_00154245.jpg +Places365_test_00154247.jpg +Places365_test_00154249.jpg +Places365_test_00154270.jpg +Places365_test_00154276.jpg +Places365_test_00154278.jpg +Places365_test_00154283.jpg +Places365_test_00154288.jpg +Places365_test_00154290.jpg +Places365_test_00154297.jpg +Places365_test_00154302.jpg +Places365_test_00154307.jpg +Places365_test_00154313.jpg +Places365_test_00154343.jpg +Places365_test_00154345.jpg +Places365_test_00154346.jpg +Places365_test_00154355.jpg +Places365_test_00154356.jpg +Places365_test_00154379.jpg +Places365_test_00154456.jpg +Places365_test_00154470.jpg +Places365_test_00154488.jpg +Places365_test_00154489.jpg +Places365_test_00154491.jpg +Places365_test_00154502.jpg +Places365_test_00154530.jpg +Places365_test_00154533.jpg +Places365_test_00154564.jpg +Places365_test_00154572.jpg +Places365_test_00154600.jpg +Places365_test_00154606.jpg +Places365_test_00154618.jpg +Places365_test_00154631.jpg +Places365_test_00154642.jpg +Places365_test_00154705.jpg +Places365_test_00154711.jpg +Places365_test_00154726.jpg +Places365_test_00154736.jpg +Places365_test_00154751.jpg +Places365_test_00154782.jpg +Places365_test_00154822.jpg +Places365_test_00154823.jpg +Places365_test_00154878.jpg +Places365_test_00154897.jpg +Places365_test_00154900.jpg +Places365_test_00154906.jpg +Places365_test_00154908.jpg +Places365_test_00154913.jpg +Places365_test_00154918.jpg +Places365_test_00154925.jpg +Places365_test_00154929.jpg +Places365_test_00154941.jpg +Places365_test_00154954.jpg +Places365_test_00154961.jpg +Places365_test_00154994.jpg +Places365_test_00154999.jpg +Places365_test_00155001.jpg +Places365_test_00155003.jpg +Places365_test_00155004.jpg +Places365_test_00155005.jpg +Places365_test_00155043.jpg +Places365_test_00155049.jpg +Places365_test_00155051.jpg +Places365_test_00155060.jpg +Places365_test_00155063.jpg +Places365_test_00155076.jpg +Places365_test_00155101.jpg +Places365_test_00155109.jpg +Places365_test_00155135.jpg +Places365_test_00155141.jpg +Places365_test_00155152.jpg +Places365_test_00155153.jpg +Places365_test_00155183.jpg +Places365_test_00155212.jpg +Places365_test_00155226.jpg +Places365_test_00155236.jpg +Places365_test_00155244.jpg +Places365_test_00155248.jpg +Places365_test_00155260.jpg +Places365_test_00155278.jpg +Places365_test_00155285.jpg +Places365_test_00155297.jpg +Places365_test_00155323.jpg +Places365_test_00155324.jpg +Places365_test_00155327.jpg +Places365_test_00155329.jpg +Places365_test_00155338.jpg +Places365_test_00155340.jpg +Places365_test_00155350.jpg +Places365_test_00155389.jpg +Places365_test_00155411.jpg +Places365_test_00155430.jpg +Places365_test_00155431.jpg +Places365_test_00155434.jpg +Places365_test_00155435.jpg +Places365_test_00155447.jpg +Places365_test_00155458.jpg +Places365_test_00155464.jpg +Places365_test_00155468.jpg +Places365_test_00155469.jpg +Places365_test_00155517.jpg +Places365_test_00155530.jpg +Places365_test_00155534.jpg +Places365_test_00155569.jpg +Places365_test_00155576.jpg +Places365_test_00155589.jpg +Places365_test_00155632.jpg +Places365_test_00155643.jpg +Places365_test_00155658.jpg +Places365_test_00155684.jpg +Places365_test_00155704.jpg +Places365_test_00155720.jpg +Places365_test_00155727.jpg +Places365_test_00155733.jpg +Places365_test_00155747.jpg +Places365_test_00155772.jpg +Places365_test_00155799.jpg +Places365_test_00155803.jpg +Places365_test_00155805.jpg +Places365_test_00155809.jpg +Places365_test_00155812.jpg +Places365_test_00155815.jpg +Places365_test_00155817.jpg +Places365_test_00155823.jpg +Places365_test_00155831.jpg +Places365_test_00155833.jpg +Places365_test_00155839.jpg +Places365_test_00155853.jpg +Places365_test_00155856.jpg +Places365_test_00155864.jpg +Places365_test_00155875.jpg +Places365_test_00155888.jpg +Places365_test_00155903.jpg +Places365_test_00155906.jpg +Places365_test_00155909.jpg +Places365_test_00155921.jpg +Places365_test_00155958.jpg +Places365_test_00155965.jpg +Places365_test_00155996.jpg +Places365_test_00156029.jpg +Places365_test_00156032.jpg +Places365_test_00156039.jpg +Places365_test_00156051.jpg +Places365_test_00156062.jpg +Places365_test_00156064.jpg +Places365_test_00156083.jpg +Places365_test_00156089.jpg +Places365_test_00156094.jpg +Places365_test_00156117.jpg +Places365_test_00156118.jpg +Places365_test_00156120.jpg +Places365_test_00156121.jpg +Places365_test_00156127.jpg +Places365_test_00156133.jpg +Places365_test_00156134.jpg +Places365_test_00156140.jpg +Places365_test_00156145.jpg +Places365_test_00156146.jpg +Places365_test_00156177.jpg +Places365_test_00156181.jpg +Places365_test_00156204.jpg +Places365_test_00156215.jpg +Places365_test_00156217.jpg +Places365_test_00156234.jpg +Places365_test_00156250.jpg +Places365_test_00156262.jpg +Places365_test_00156284.jpg +Places365_test_00156296.jpg +Places365_test_00156327.jpg +Places365_test_00156355.jpg +Places365_test_00156357.jpg +Places365_test_00156359.jpg +Places365_test_00156366.jpg +Places365_test_00156383.jpg +Places365_test_00156384.jpg +Places365_test_00156385.jpg +Places365_test_00156399.jpg +Places365_test_00156400.jpg +Places365_test_00156416.jpg +Places365_test_00156427.jpg +Places365_test_00156430.jpg +Places365_test_00156440.jpg +Places365_test_00156445.jpg +Places365_test_00156455.jpg +Places365_test_00156466.jpg +Places365_test_00156478.jpg +Places365_test_00156495.jpg +Places365_test_00156512.jpg +Places365_test_00156524.jpg +Places365_test_00156529.jpg +Places365_test_00156534.jpg +Places365_test_00156554.jpg +Places365_test_00156581.jpg +Places365_test_00156615.jpg +Places365_test_00156620.jpg +Places365_test_00156623.jpg +Places365_test_00156651.jpg +Places365_test_00156660.jpg +Places365_test_00156666.jpg +Places365_test_00156695.jpg +Places365_test_00156698.jpg +Places365_test_00156713.jpg +Places365_test_00156717.jpg +Places365_test_00156721.jpg +Places365_test_00156727.jpg +Places365_test_00156730.jpg +Places365_test_00156737.jpg +Places365_test_00156750.jpg +Places365_test_00156752.jpg +Places365_test_00156767.jpg +Places365_test_00156773.jpg +Places365_test_00156789.jpg +Places365_test_00156805.jpg +Places365_test_00156810.jpg +Places365_test_00156821.jpg +Places365_test_00156830.jpg +Places365_test_00156845.jpg +Places365_test_00156853.jpg +Places365_test_00156886.jpg +Places365_test_00156890.jpg +Places365_test_00156893.jpg +Places365_test_00156895.jpg +Places365_test_00156919.jpg +Places365_test_00156948.jpg +Places365_test_00156968.jpg +Places365_test_00156985.jpg +Places365_test_00156989.jpg +Places365_test_00156993.jpg +Places365_test_00157005.jpg +Places365_test_00157015.jpg +Places365_test_00157028.jpg +Places365_test_00157044.jpg +Places365_test_00157045.jpg +Places365_test_00157057.jpg +Places365_test_00157063.jpg +Places365_test_00157064.jpg +Places365_test_00157067.jpg +Places365_test_00157081.jpg +Places365_test_00157087.jpg +Places365_test_00157136.jpg +Places365_test_00157142.jpg +Places365_test_00157146.jpg +Places365_test_00157162.jpg +Places365_test_00157170.jpg +Places365_test_00157176.jpg +Places365_test_00157187.jpg +Places365_test_00157204.jpg +Places365_test_00157210.jpg +Places365_test_00157219.jpg +Places365_test_00157239.jpg +Places365_test_00157244.jpg +Places365_test_00157248.jpg +Places365_test_00157250.jpg +Places365_test_00157256.jpg +Places365_test_00157273.jpg +Places365_test_00157284.jpg +Places365_test_00157289.jpg +Places365_test_00157312.jpg +Places365_test_00157333.jpg +Places365_test_00157336.jpg +Places365_test_00157341.jpg +Places365_test_00157348.jpg +Places365_test_00157368.jpg +Places365_test_00157372.jpg +Places365_test_00157388.jpg +Places365_test_00157396.jpg +Places365_test_00157411.jpg +Places365_test_00157423.jpg +Places365_test_00157424.jpg +Places365_test_00157428.jpg +Places365_test_00157468.jpg +Places365_test_00157470.jpg +Places365_test_00157479.jpg +Places365_test_00157481.jpg +Places365_test_00157487.jpg +Places365_test_00157493.jpg +Places365_test_00157506.jpg +Places365_test_00157510.jpg +Places365_test_00157515.jpg +Places365_test_00157525.jpg +Places365_test_00157544.jpg +Places365_test_00157552.jpg +Places365_test_00157572.jpg +Places365_test_00157578.jpg +Places365_test_00157586.jpg +Places365_test_00157592.jpg +Places365_test_00157595.jpg +Places365_test_00157600.jpg +Places365_test_00157602.jpg +Places365_test_00157613.jpg +Places365_test_00157615.jpg +Places365_test_00157620.jpg +Places365_test_00157627.jpg +Places365_test_00157629.jpg +Places365_test_00157631.jpg +Places365_test_00157635.jpg +Places365_test_00157638.jpg +Places365_test_00157646.jpg +Places365_test_00157650.jpg +Places365_test_00157651.jpg +Places365_test_00157658.jpg +Places365_test_00157661.jpg +Places365_test_00157671.jpg +Places365_test_00157689.jpg +Places365_test_00157698.jpg +Places365_test_00157700.jpg +Places365_test_00157704.jpg +Places365_test_00157712.jpg +Places365_test_00157714.jpg +Places365_test_00157718.jpg +Places365_test_00157726.jpg +Places365_test_00157728.jpg +Places365_test_00157743.jpg +Places365_test_00157747.jpg +Places365_test_00157752.jpg +Places365_test_00157760.jpg +Places365_test_00157771.jpg +Places365_test_00157793.jpg +Places365_test_00157811.jpg +Places365_test_00157813.jpg +Places365_test_00157876.jpg +Places365_test_00157880.jpg +Places365_test_00157892.jpg +Places365_test_00157900.jpg +Places365_test_00157928.jpg +Places365_test_00157943.jpg +Places365_test_00157951.jpg +Places365_test_00157974.jpg +Places365_test_00158000.jpg +Places365_test_00158027.jpg +Places365_test_00158028.jpg +Places365_test_00158029.jpg +Places365_test_00158031.jpg +Places365_test_00158046.jpg +Places365_test_00158052.jpg +Places365_test_00158056.jpg +Places365_test_00158088.jpg +Places365_test_00158091.jpg +Places365_test_00158099.jpg +Places365_test_00158113.jpg +Places365_test_00158135.jpg +Places365_test_00158159.jpg +Places365_test_00158161.jpg +Places365_test_00158162.jpg +Places365_test_00158171.jpg +Places365_test_00158182.jpg +Places365_test_00158183.jpg +Places365_test_00158191.jpg +Places365_test_00158196.jpg +Places365_test_00158205.jpg +Places365_test_00158206.jpg +Places365_test_00158213.jpg +Places365_test_00158229.jpg +Places365_test_00158241.jpg +Places365_test_00158246.jpg +Places365_test_00158253.jpg +Places365_test_00158272.jpg +Places365_test_00158278.jpg +Places365_test_00158284.jpg +Places365_test_00158288.jpg +Places365_test_00158292.jpg +Places365_test_00158296.jpg +Places365_test_00158303.jpg +Places365_test_00158312.jpg +Places365_test_00158317.jpg +Places365_test_00158325.jpg +Places365_test_00158333.jpg +Places365_test_00158351.jpg +Places365_test_00158378.jpg +Places365_test_00158390.jpg +Places365_test_00158397.jpg +Places365_test_00158400.jpg +Places365_test_00158422.jpg +Places365_test_00158428.jpg +Places365_test_00158435.jpg +Places365_test_00158442.jpg +Places365_test_00158461.jpg +Places365_test_00158466.jpg +Places365_test_00158468.jpg +Places365_test_00158472.jpg +Places365_test_00158483.jpg +Places365_test_00158486.jpg +Places365_test_00158525.jpg +Places365_test_00158540.jpg +Places365_test_00158581.jpg +Places365_test_00158586.jpg +Places365_test_00158591.jpg +Places365_test_00158600.jpg +Places365_test_00158603.jpg +Places365_test_00158638.jpg +Places365_test_00158646.jpg +Places365_test_00158648.jpg +Places365_test_00158653.jpg +Places365_test_00158659.jpg +Places365_test_00158662.jpg +Places365_test_00158666.jpg +Places365_test_00158672.jpg +Places365_test_00158678.jpg +Places365_test_00158688.jpg +Places365_test_00158696.jpg +Places365_test_00158706.jpg +Places365_test_00158729.jpg +Places365_test_00158741.jpg +Places365_test_00158753.jpg +Places365_test_00158756.jpg +Places365_test_00158757.jpg +Places365_test_00158760.jpg +Places365_test_00158768.jpg +Places365_test_00158786.jpg +Places365_test_00158788.jpg +Places365_test_00158790.jpg +Places365_test_00158798.jpg +Places365_test_00158829.jpg +Places365_test_00158832.jpg +Places365_test_00158850.jpg +Places365_test_00158852.jpg +Places365_test_00158856.jpg +Places365_test_00158869.jpg +Places365_test_00158880.jpg +Places365_test_00158901.jpg +Places365_test_00158904.jpg +Places365_test_00158943.jpg +Places365_test_00158965.jpg +Places365_test_00158982.jpg +Places365_test_00158986.jpg +Places365_test_00158989.jpg +Places365_test_00158990.jpg +Places365_test_00159008.jpg +Places365_test_00159019.jpg +Places365_test_00159044.jpg +Places365_test_00159054.jpg +Places365_test_00159070.jpg +Places365_test_00159071.jpg +Places365_test_00159074.jpg +Places365_test_00159096.jpg +Places365_test_00159098.jpg +Places365_test_00159129.jpg +Places365_test_00159131.jpg +Places365_test_00159133.jpg +Places365_test_00159141.jpg +Places365_test_00159143.jpg +Places365_test_00159147.jpg +Places365_test_00159163.jpg +Places365_test_00159169.jpg +Places365_test_00159200.jpg +Places365_test_00159209.jpg +Places365_test_00159222.jpg +Places365_test_00159240.jpg +Places365_test_00159241.jpg +Places365_test_00159256.jpg +Places365_test_00159260.jpg +Places365_test_00159262.jpg +Places365_test_00159272.jpg +Places365_test_00159285.jpg +Places365_test_00159286.jpg +Places365_test_00159314.jpg +Places365_test_00159333.jpg +Places365_test_00159339.jpg +Places365_test_00159342.jpg +Places365_test_00159378.jpg +Places365_test_00159380.jpg +Places365_test_00159387.jpg +Places365_test_00159401.jpg +Places365_test_00159405.jpg +Places365_test_00159411.jpg +Places365_test_00159429.jpg +Places365_test_00159461.jpg +Places365_test_00159466.jpg +Places365_test_00159483.jpg +Places365_test_00159509.jpg +Places365_test_00159516.jpg +Places365_test_00159527.jpg +Places365_test_00159528.jpg +Places365_test_00159530.jpg +Places365_test_00159550.jpg +Places365_test_00159566.jpg +Places365_test_00159595.jpg +Places365_test_00159599.jpg +Places365_test_00159610.jpg +Places365_test_00159641.jpg +Places365_test_00159646.jpg +Places365_test_00159653.jpg +Places365_test_00159655.jpg +Places365_test_00159666.jpg +Places365_test_00159671.jpg +Places365_test_00159677.jpg +Places365_test_00159696.jpg +Places365_test_00159703.jpg +Places365_test_00159719.jpg +Places365_test_00159723.jpg +Places365_test_00159759.jpg +Places365_test_00159770.jpg +Places365_test_00159778.jpg +Places365_test_00159793.jpg +Places365_test_00159817.jpg +Places365_test_00159829.jpg +Places365_test_00159830.jpg +Places365_test_00159833.jpg +Places365_test_00159838.jpg +Places365_test_00159842.jpg +Places365_test_00159870.jpg +Places365_test_00159872.jpg +Places365_test_00159881.jpg +Places365_test_00159888.jpg +Places365_test_00159892.jpg +Places365_test_00159908.jpg +Places365_test_00159910.jpg +Places365_test_00159913.jpg +Places365_test_00159947.jpg +Places365_test_00159956.jpg +Places365_test_00159957.jpg +Places365_test_00159998.jpg +Places365_test_00160014.jpg +Places365_test_00160035.jpg +Places365_test_00160048.jpg +Places365_test_00160050.jpg +Places365_test_00160051.jpg +Places365_test_00160054.jpg +Places365_test_00160056.jpg +Places365_test_00160064.jpg +Places365_test_00160080.jpg +Places365_test_00160117.jpg +Places365_test_00160121.jpg +Places365_test_00160129.jpg +Places365_test_00160132.jpg +Places365_test_00160133.jpg +Places365_test_00160149.jpg +Places365_test_00160180.jpg +Places365_test_00160184.jpg +Places365_test_00160193.jpg +Places365_test_00160202.jpg +Places365_test_00160205.jpg +Places365_test_00160216.jpg +Places365_test_00160227.jpg +Places365_test_00160231.jpg +Places365_test_00160232.jpg +Places365_test_00160242.jpg +Places365_test_00160243.jpg +Places365_test_00160248.jpg +Places365_test_00160258.jpg +Places365_test_00160264.jpg +Places365_test_00160266.jpg +Places365_test_00160273.jpg +Places365_test_00160279.jpg +Places365_test_00160288.jpg +Places365_test_00160289.jpg +Places365_test_00160292.jpg +Places365_test_00160317.jpg +Places365_test_00160338.jpg +Places365_test_00160371.jpg +Places365_test_00160373.jpg +Places365_test_00160378.jpg +Places365_test_00160380.jpg +Places365_test_00160392.jpg +Places365_test_00160399.jpg +Places365_test_00160406.jpg +Places365_test_00160447.jpg +Places365_test_00160469.jpg +Places365_test_00160481.jpg +Places365_test_00160491.jpg +Places365_test_00160500.jpg +Places365_test_00160513.jpg +Places365_test_00160514.jpg +Places365_test_00160528.jpg +Places365_test_00160531.jpg +Places365_test_00160535.jpg +Places365_test_00160545.jpg +Places365_test_00160551.jpg +Places365_test_00160553.jpg +Places365_test_00160574.jpg +Places365_test_00160583.jpg +Places365_test_00160584.jpg +Places365_test_00160586.jpg +Places365_test_00160618.jpg +Places365_test_00160674.jpg +Places365_test_00160676.jpg +Places365_test_00160687.jpg +Places365_test_00160703.jpg +Places365_test_00160709.jpg +Places365_test_00160745.jpg +Places365_test_00160748.jpg +Places365_test_00160752.jpg +Places365_test_00160753.jpg +Places365_test_00160760.jpg +Places365_test_00160762.jpg +Places365_test_00160764.jpg +Places365_test_00160768.jpg +Places365_test_00160772.jpg +Places365_test_00160773.jpg +Places365_test_00160777.jpg +Places365_test_00160788.jpg +Places365_test_00160793.jpg +Places365_test_00160794.jpg +Places365_test_00160815.jpg +Places365_test_00160816.jpg +Places365_test_00160819.jpg +Places365_test_00160823.jpg +Places365_test_00160827.jpg +Places365_test_00160833.jpg +Places365_test_00160837.jpg +Places365_test_00160839.jpg +Places365_test_00160843.jpg +Places365_test_00160858.jpg +Places365_test_00160859.jpg +Places365_test_00160861.jpg +Places365_test_00160862.jpg +Places365_test_00160869.jpg +Places365_test_00160878.jpg +Places365_test_00160883.jpg +Places365_test_00160886.jpg +Places365_test_00160892.jpg +Places365_test_00160899.jpg +Places365_test_00160901.jpg +Places365_test_00160908.jpg +Places365_test_00160919.jpg +Places365_test_00160933.jpg +Places365_test_00160943.jpg +Places365_test_00160952.jpg +Places365_test_00160969.jpg +Places365_test_00160970.jpg +Places365_test_00161005.jpg +Places365_test_00161027.jpg +Places365_test_00161035.jpg +Places365_test_00161041.jpg +Places365_test_00161047.jpg +Places365_test_00161053.jpg +Places365_test_00161060.jpg +Places365_test_00161066.jpg +Places365_test_00161106.jpg +Places365_test_00161114.jpg +Places365_test_00161120.jpg +Places365_test_00161131.jpg +Places365_test_00161147.jpg +Places365_test_00161171.jpg +Places365_test_00161172.jpg +Places365_test_00161238.jpg +Places365_test_00161252.jpg +Places365_test_00161262.jpg +Places365_test_00161272.jpg +Places365_test_00161275.jpg +Places365_test_00161281.jpg +Places365_test_00161298.jpg +Places365_test_00161300.jpg +Places365_test_00161303.jpg +Places365_test_00161304.jpg +Places365_test_00161307.jpg +Places365_test_00161313.jpg +Places365_test_00161322.jpg +Places365_test_00161337.jpg +Places365_test_00161377.jpg +Places365_test_00161390.jpg +Places365_test_00161399.jpg +Places365_test_00161411.jpg +Places365_test_00161414.jpg +Places365_test_00161423.jpg +Places365_test_00161425.jpg +Places365_test_00161433.jpg +Places365_test_00161434.jpg +Places365_test_00161462.jpg +Places365_test_00161469.jpg +Places365_test_00161505.jpg +Places365_test_00161559.jpg +Places365_test_00161575.jpg +Places365_test_00161588.jpg +Places365_test_00161608.jpg +Places365_test_00161628.jpg +Places365_test_00161635.jpg +Places365_test_00161636.jpg +Places365_test_00161644.jpg +Places365_test_00161646.jpg +Places365_test_00161647.jpg +Places365_test_00161655.jpg +Places365_test_00161659.jpg +Places365_test_00161681.jpg +Places365_test_00161691.jpg +Places365_test_00161708.jpg +Places365_test_00161714.jpg +Places365_test_00161717.jpg +Places365_test_00161723.jpg +Places365_test_00161739.jpg +Places365_test_00161761.jpg +Places365_test_00161762.jpg +Places365_test_00161764.jpg +Places365_test_00161809.jpg +Places365_test_00161821.jpg +Places365_test_00161832.jpg +Places365_test_00161837.jpg +Places365_test_00161843.jpg +Places365_test_00161851.jpg +Places365_test_00161855.jpg +Places365_test_00161863.jpg +Places365_test_00161866.jpg +Places365_test_00161921.jpg +Places365_test_00161922.jpg +Places365_test_00161936.jpg +Places365_test_00161946.jpg +Places365_test_00161956.jpg +Places365_test_00161958.jpg +Places365_test_00162014.jpg +Places365_test_00162015.jpg +Places365_test_00162022.jpg +Places365_test_00162024.jpg +Places365_test_00162031.jpg +Places365_test_00162043.jpg +Places365_test_00162068.jpg +Places365_test_00162080.jpg +Places365_test_00162099.jpg +Places365_test_00162108.jpg +Places365_test_00162127.jpg +Places365_test_00162141.jpg +Places365_test_00162164.jpg +Places365_test_00162166.jpg +Places365_test_00162177.jpg +Places365_test_00162179.jpg +Places365_test_00162195.jpg +Places365_test_00162210.jpg +Places365_test_00162215.jpg +Places365_test_00162216.jpg +Places365_test_00162225.jpg +Places365_test_00162271.jpg +Places365_test_00162281.jpg +Places365_test_00162310.jpg +Places365_test_00162313.jpg +Places365_test_00162334.jpg +Places365_test_00162358.jpg +Places365_test_00162364.jpg +Places365_test_00162369.jpg +Places365_test_00162371.jpg +Places365_test_00162374.jpg +Places365_test_00162380.jpg +Places365_test_00162405.jpg +Places365_test_00162413.jpg +Places365_test_00162420.jpg +Places365_test_00162423.jpg +Places365_test_00162429.jpg +Places365_test_00162437.jpg +Places365_test_00162473.jpg +Places365_test_00162474.jpg +Places365_test_00162491.jpg +Places365_test_00162506.jpg +Places365_test_00162512.jpg +Places365_test_00162516.jpg +Places365_test_00162522.jpg +Places365_test_00162534.jpg +Places365_test_00162536.jpg +Places365_test_00162537.jpg +Places365_test_00162552.jpg +Places365_test_00162566.jpg +Places365_test_00162569.jpg +Places365_test_00162572.jpg +Places365_test_00162581.jpg +Places365_test_00162608.jpg +Places365_test_00162618.jpg +Places365_test_00162621.jpg +Places365_test_00162652.jpg +Places365_test_00162654.jpg +Places365_test_00162684.jpg +Places365_test_00162689.jpg +Places365_test_00162698.jpg +Places365_test_00162704.jpg +Places365_test_00162715.jpg +Places365_test_00162725.jpg +Places365_test_00162730.jpg +Places365_test_00162735.jpg +Places365_test_00162756.jpg +Places365_test_00162764.jpg +Places365_test_00162769.jpg +Places365_test_00162787.jpg +Places365_test_00162792.jpg +Places365_test_00162843.jpg +Places365_test_00162847.jpg +Places365_test_00162885.jpg +Places365_test_00162903.jpg +Places365_test_00162909.jpg +Places365_test_00162913.jpg +Places365_test_00162924.jpg +Places365_test_00162930.jpg +Places365_test_00162936.jpg +Places365_test_00162942.jpg +Places365_test_00162971.jpg +Places365_test_00162976.jpg +Places365_test_00162991.jpg +Places365_test_00163006.jpg +Places365_test_00163012.jpg +Places365_test_00163033.jpg +Places365_test_00163036.jpg +Places365_test_00163052.jpg +Places365_test_00163059.jpg +Places365_test_00163064.jpg +Places365_test_00163065.jpg +Places365_test_00163067.jpg +Places365_test_00163116.jpg +Places365_test_00163126.jpg +Places365_test_00163143.jpg +Places365_test_00163147.jpg +Places365_test_00163149.jpg +Places365_test_00163156.jpg +Places365_test_00163166.jpg +Places365_test_00163167.jpg +Places365_test_00163179.jpg +Places365_test_00163182.jpg +Places365_test_00163189.jpg +Places365_test_00163208.jpg +Places365_test_00163222.jpg +Places365_test_00163226.jpg +Places365_test_00163249.jpg +Places365_test_00163251.jpg +Places365_test_00163267.jpg +Places365_test_00163282.jpg +Places365_test_00163285.jpg +Places365_test_00163291.jpg +Places365_test_00163306.jpg +Places365_test_00163316.jpg +Places365_test_00163325.jpg +Places365_test_00163332.jpg +Places365_test_00163357.jpg +Places365_test_00163362.jpg +Places365_test_00163382.jpg +Places365_test_00163384.jpg +Places365_test_00163407.jpg +Places365_test_00163421.jpg +Places365_test_00163424.jpg +Places365_test_00163426.jpg +Places365_test_00163427.jpg +Places365_test_00163440.jpg +Places365_test_00163452.jpg +Places365_test_00163453.jpg +Places365_test_00163495.jpg +Places365_test_00163501.jpg +Places365_test_00163502.jpg +Places365_test_00163510.jpg +Places365_test_00163523.jpg +Places365_test_00163528.jpg +Places365_test_00163536.jpg +Places365_test_00163546.jpg +Places365_test_00163547.jpg +Places365_test_00163573.jpg +Places365_test_00163594.jpg +Places365_test_00163603.jpg +Places365_test_00163605.jpg +Places365_test_00163613.jpg +Places365_test_00163616.jpg +Places365_test_00163626.jpg +Places365_test_00163627.jpg +Places365_test_00163628.jpg +Places365_test_00163629.jpg +Places365_test_00163656.jpg +Places365_test_00163658.jpg +Places365_test_00163659.jpg +Places365_test_00163663.jpg +Places365_test_00163664.jpg +Places365_test_00163669.jpg +Places365_test_00163679.jpg +Places365_test_00163689.jpg +Places365_test_00163691.jpg +Places365_test_00163692.jpg +Places365_test_00163698.jpg +Places365_test_00163701.jpg +Places365_test_00163706.jpg +Places365_test_00163717.jpg +Places365_test_00163727.jpg +Places365_test_00163740.jpg +Places365_test_00163750.jpg +Places365_test_00163752.jpg +Places365_test_00163762.jpg +Places365_test_00163766.jpg +Places365_test_00163774.jpg +Places365_test_00163779.jpg +Places365_test_00163791.jpg +Places365_test_00163793.jpg +Places365_test_00163800.jpg +Places365_test_00163828.jpg +Places365_test_00163830.jpg +Places365_test_00163853.jpg +Places365_test_00163855.jpg +Places365_test_00163857.jpg +Places365_test_00163870.jpg +Places365_test_00163877.jpg +Places365_test_00163902.jpg +Places365_test_00163904.jpg +Places365_test_00163910.jpg +Places365_test_00163916.jpg +Places365_test_00163948.jpg +Places365_test_00163959.jpg +Places365_test_00163976.jpg +Places365_test_00163980.jpg +Places365_test_00163999.jpg +Places365_test_00164057.jpg +Places365_test_00164058.jpg +Places365_test_00164059.jpg +Places365_test_00164075.jpg +Places365_test_00164078.jpg +Places365_test_00164085.jpg +Places365_test_00164089.jpg +Places365_test_00164090.jpg +Places365_test_00164105.jpg +Places365_test_00164109.jpg +Places365_test_00164116.jpg +Places365_test_00164125.jpg +Places365_test_00164153.jpg +Places365_test_00164154.jpg +Places365_test_00164165.jpg +Places365_test_00164182.jpg +Places365_test_00164188.jpg +Places365_test_00164193.jpg +Places365_test_00164250.jpg +Places365_test_00164251.jpg +Places365_test_00164265.jpg +Places365_test_00164285.jpg +Places365_test_00164303.jpg +Places365_test_00164305.jpg +Places365_test_00164309.jpg +Places365_test_00164361.jpg +Places365_test_00164363.jpg +Places365_test_00164379.jpg +Places365_test_00164388.jpg +Places365_test_00164394.jpg +Places365_test_00164412.jpg +Places365_test_00164427.jpg +Places365_test_00164441.jpg +Places365_test_00164449.jpg +Places365_test_00164463.jpg +Places365_test_00164470.jpg +Places365_test_00164485.jpg +Places365_test_00164507.jpg +Places365_test_00164512.jpg +Places365_test_00164535.jpg +Places365_test_00164540.jpg +Places365_test_00164541.jpg +Places365_test_00164543.jpg +Places365_test_00164563.jpg +Places365_test_00164569.jpg +Places365_test_00164570.jpg +Places365_test_00164574.jpg +Places365_test_00164596.jpg +Places365_test_00164598.jpg +Places365_test_00164604.jpg +Places365_test_00164605.jpg +Places365_test_00164611.jpg +Places365_test_00164619.jpg +Places365_test_00164629.jpg +Places365_test_00164659.jpg +Places365_test_00164671.jpg +Places365_test_00164673.jpg +Places365_test_00164691.jpg +Places365_test_00164694.jpg +Places365_test_00164697.jpg +Places365_test_00164709.jpg +Places365_test_00164715.jpg +Places365_test_00164716.jpg +Places365_test_00164720.jpg +Places365_test_00164736.jpg +Places365_test_00164748.jpg +Places365_test_00164749.jpg +Places365_test_00164760.jpg +Places365_test_00164769.jpg +Places365_test_00164791.jpg +Places365_test_00164828.jpg +Places365_test_00164831.jpg +Places365_test_00164851.jpg +Places365_test_00164862.jpg +Places365_test_00164892.jpg +Places365_test_00164940.jpg +Places365_test_00164943.jpg +Places365_test_00164965.jpg +Places365_test_00164984.jpg +Places365_test_00164987.jpg +Places365_test_00164988.jpg +Places365_test_00164993.jpg +Places365_test_00165001.jpg +Places365_test_00165014.jpg +Places365_test_00165016.jpg +Places365_test_00165024.jpg +Places365_test_00165049.jpg +Places365_test_00165060.jpg +Places365_test_00165061.jpg +Places365_test_00165074.jpg +Places365_test_00165080.jpg +Places365_test_00165085.jpg +Places365_test_00165091.jpg +Places365_test_00165093.jpg +Places365_test_00165104.jpg +Places365_test_00165113.jpg +Places365_test_00165119.jpg +Places365_test_00165123.jpg +Places365_test_00165131.jpg +Places365_test_00165147.jpg +Places365_test_00165158.jpg +Places365_test_00165159.jpg +Places365_test_00165190.jpg +Places365_test_00165197.jpg +Places365_test_00165201.jpg +Places365_test_00165209.jpg +Places365_test_00165212.jpg +Places365_test_00165213.jpg +Places365_test_00165218.jpg +Places365_test_00165220.jpg +Places365_test_00165257.jpg +Places365_test_00165268.jpg +Places365_test_00165277.jpg +Places365_test_00165282.jpg +Places365_test_00165286.jpg +Places365_test_00165288.jpg +Places365_test_00165298.jpg +Places365_test_00165300.jpg +Places365_test_00165301.jpg +Places365_test_00165326.jpg +Places365_test_00165335.jpg +Places365_test_00165349.jpg +Places365_test_00165357.jpg +Places365_test_00165374.jpg +Places365_test_00165377.jpg +Places365_test_00165378.jpg +Places365_test_00165386.jpg +Places365_test_00165427.jpg +Places365_test_00165430.jpg +Places365_test_00165441.jpg +Places365_test_00165459.jpg +Places365_test_00165462.jpg +Places365_test_00165469.jpg +Places365_test_00165511.jpg +Places365_test_00165528.jpg +Places365_test_00165532.jpg +Places365_test_00165541.jpg +Places365_test_00165549.jpg +Places365_test_00165560.jpg +Places365_test_00165565.jpg +Places365_test_00165574.jpg +Places365_test_00165581.jpg +Places365_test_00165582.jpg +Places365_test_00165593.jpg +Places365_test_00165599.jpg +Places365_test_00165602.jpg +Places365_test_00165610.jpg +Places365_test_00165622.jpg +Places365_test_00165627.jpg +Places365_test_00165630.jpg +Places365_test_00165641.jpg +Places365_test_00165667.jpg +Places365_test_00165670.jpg +Places365_test_00165700.jpg +Places365_test_00165725.jpg +Places365_test_00165732.jpg +Places365_test_00165753.jpg +Places365_test_00165759.jpg +Places365_test_00165763.jpg +Places365_test_00165772.jpg +Places365_test_00165787.jpg +Places365_test_00165789.jpg +Places365_test_00165799.jpg +Places365_test_00165807.jpg +Places365_test_00165813.jpg +Places365_test_00165849.jpg +Places365_test_00165863.jpg +Places365_test_00165869.jpg +Places365_test_00165874.jpg +Places365_test_00165882.jpg +Places365_test_00165887.jpg +Places365_test_00165898.jpg +Places365_test_00165939.jpg +Places365_test_00165954.jpg +Places365_test_00165974.jpg +Places365_test_00165976.jpg +Places365_test_00165982.jpg +Places365_test_00165988.jpg +Places365_test_00165993.jpg +Places365_test_00166027.jpg +Places365_test_00166040.jpg +Places365_test_00166042.jpg +Places365_test_00166066.jpg +Places365_test_00166101.jpg +Places365_test_00166102.jpg +Places365_test_00166114.jpg +Places365_test_00166115.jpg +Places365_test_00166121.jpg +Places365_test_00166125.jpg +Places365_test_00166126.jpg +Places365_test_00166141.jpg +Places365_test_00166166.jpg +Places365_test_00166178.jpg +Places365_test_00166179.jpg +Places365_test_00166192.jpg +Places365_test_00166211.jpg +Places365_test_00166212.jpg +Places365_test_00166221.jpg +Places365_test_00166225.jpg +Places365_test_00166249.jpg +Places365_test_00166250.jpg +Places365_test_00166254.jpg +Places365_test_00166258.jpg +Places365_test_00166299.jpg +Places365_test_00166311.jpg +Places365_test_00166322.jpg +Places365_test_00166345.jpg +Places365_test_00166347.jpg +Places365_test_00166348.jpg +Places365_test_00166350.jpg +Places365_test_00166355.jpg +Places365_test_00166363.jpg +Places365_test_00166365.jpg +Places365_test_00166367.jpg +Places365_test_00166368.jpg +Places365_test_00166374.jpg +Places365_test_00166379.jpg +Places365_test_00166395.jpg +Places365_test_00166412.jpg +Places365_test_00166432.jpg +Places365_test_00166450.jpg +Places365_test_00166457.jpg +Places365_test_00166461.jpg +Places365_test_00166479.jpg +Places365_test_00166497.jpg +Places365_test_00166505.jpg +Places365_test_00166527.jpg +Places365_test_00166538.jpg +Places365_test_00166561.jpg +Places365_test_00166579.jpg +Places365_test_00166595.jpg +Places365_test_00166600.jpg +Places365_test_00166606.jpg +Places365_test_00166614.jpg +Places365_test_00166619.jpg +Places365_test_00166620.jpg +Places365_test_00166633.jpg +Places365_test_00166649.jpg +Places365_test_00166653.jpg +Places365_test_00166659.jpg +Places365_test_00166665.jpg +Places365_test_00166670.jpg +Places365_test_00166687.jpg +Places365_test_00166703.jpg +Places365_test_00166723.jpg +Places365_test_00166730.jpg +Places365_test_00166734.jpg +Places365_test_00166742.jpg +Places365_test_00166744.jpg +Places365_test_00166757.jpg +Places365_test_00166762.jpg +Places365_test_00166772.jpg +Places365_test_00166773.jpg +Places365_test_00166781.jpg +Places365_test_00166787.jpg +Places365_test_00166795.jpg +Places365_test_00166804.jpg +Places365_test_00166825.jpg +Places365_test_00166844.jpg +Places365_test_00166846.jpg +Places365_test_00166866.jpg +Places365_test_00166871.jpg +Places365_test_00166887.jpg +Places365_test_00166900.jpg +Places365_test_00166902.jpg +Places365_test_00166915.jpg +Places365_test_00166924.jpg +Places365_test_00166955.jpg +Places365_test_00166957.jpg +Places365_test_00167001.jpg +Places365_test_00167011.jpg +Places365_test_00167018.jpg +Places365_test_00167034.jpg +Places365_test_00167072.jpg +Places365_test_00167075.jpg +Places365_test_00167079.jpg +Places365_test_00167082.jpg +Places365_test_00167083.jpg +Places365_test_00167091.jpg +Places365_test_00167106.jpg +Places365_test_00167110.jpg +Places365_test_00167122.jpg +Places365_test_00167130.jpg +Places365_test_00167160.jpg +Places365_test_00167170.jpg +Places365_test_00167183.jpg +Places365_test_00167191.jpg +Places365_test_00167217.jpg +Places365_test_00167239.jpg +Places365_test_00167253.jpg +Places365_test_00167262.jpg +Places365_test_00167281.jpg +Places365_test_00167297.jpg +Places365_test_00167309.jpg +Places365_test_00167316.jpg +Places365_test_00167340.jpg +Places365_test_00167349.jpg +Places365_test_00167350.jpg +Places365_test_00167357.jpg +Places365_test_00167359.jpg +Places365_test_00167374.jpg +Places365_test_00167412.jpg +Places365_test_00167427.jpg +Places365_test_00167429.jpg +Places365_test_00167435.jpg +Places365_test_00167442.jpg +Places365_test_00167451.jpg +Places365_test_00167457.jpg +Places365_test_00167465.jpg +Places365_test_00167473.jpg +Places365_test_00167519.jpg +Places365_test_00167540.jpg +Places365_test_00167543.jpg +Places365_test_00167547.jpg +Places365_test_00167551.jpg +Places365_test_00167553.jpg +Places365_test_00167557.jpg +Places365_test_00167568.jpg +Places365_test_00167585.jpg +Places365_test_00167588.jpg +Places365_test_00167597.jpg +Places365_test_00167606.jpg +Places365_test_00167636.jpg +Places365_test_00167637.jpg +Places365_test_00167658.jpg +Places365_test_00167660.jpg +Places365_test_00167681.jpg +Places365_test_00167688.jpg +Places365_test_00167690.jpg +Places365_test_00167698.jpg +Places365_test_00167705.jpg +Places365_test_00167712.jpg +Places365_test_00167727.jpg +Places365_test_00167762.jpg +Places365_test_00167772.jpg +Places365_test_00167780.jpg +Places365_test_00167784.jpg +Places365_test_00167785.jpg +Places365_test_00167788.jpg +Places365_test_00167791.jpg +Places365_test_00167833.jpg +Places365_test_00167843.jpg +Places365_test_00167864.jpg +Places365_test_00167898.jpg +Places365_test_00167906.jpg +Places365_test_00167914.jpg +Places365_test_00167918.jpg +Places365_test_00167929.jpg +Places365_test_00167939.jpg +Places365_test_00167966.jpg +Places365_test_00167967.jpg +Places365_test_00167985.jpg +Places365_test_00168012.jpg +Places365_test_00168015.jpg +Places365_test_00168018.jpg +Places365_test_00168040.jpg +Places365_test_00168042.jpg +Places365_test_00168049.jpg +Places365_test_00168052.jpg +Places365_test_00168057.jpg +Places365_test_00168081.jpg +Places365_test_00168091.jpg +Places365_test_00168092.jpg +Places365_test_00168094.jpg +Places365_test_00168111.jpg +Places365_test_00168112.jpg +Places365_test_00168143.jpg +Places365_test_00168151.jpg +Places365_test_00168156.jpg +Places365_test_00168165.jpg +Places365_test_00168188.jpg +Places365_test_00168189.jpg +Places365_test_00168214.jpg +Places365_test_00168222.jpg +Places365_test_00168225.jpg +Places365_test_00168230.jpg +Places365_test_00168234.jpg +Places365_test_00168243.jpg +Places365_test_00168251.jpg +Places365_test_00168257.jpg +Places365_test_00168264.jpg +Places365_test_00168267.jpg +Places365_test_00168268.jpg +Places365_test_00168269.jpg +Places365_test_00168276.jpg +Places365_test_00168310.jpg +Places365_test_00168327.jpg +Places365_test_00168330.jpg +Places365_test_00168336.jpg +Places365_test_00168340.jpg +Places365_test_00168346.jpg +Places365_test_00168350.jpg +Places365_test_00168353.jpg +Places365_test_00168359.jpg +Places365_test_00168369.jpg +Places365_test_00168388.jpg +Places365_test_00168394.jpg +Places365_test_00168395.jpg +Places365_test_00168406.jpg +Places365_test_00168416.jpg +Places365_test_00168425.jpg +Places365_test_00168440.jpg +Places365_test_00168445.jpg +Places365_test_00168478.jpg +Places365_test_00168479.jpg +Places365_test_00168486.jpg +Places365_test_00168503.jpg +Places365_test_00168530.jpg +Places365_test_00168531.jpg +Places365_test_00168535.jpg +Places365_test_00168555.jpg +Places365_test_00168560.jpg +Places365_test_00168567.jpg +Places365_test_00168573.jpg +Places365_test_00168589.jpg +Places365_test_00168605.jpg +Places365_test_00168616.jpg +Places365_test_00168637.jpg +Places365_test_00168638.jpg +Places365_test_00168656.jpg +Places365_test_00168660.jpg +Places365_test_00168663.jpg +Places365_test_00168666.jpg +Places365_test_00168671.jpg +Places365_test_00168695.jpg +Places365_test_00168704.jpg +Places365_test_00168709.jpg +Places365_test_00168724.jpg +Places365_test_00168740.jpg +Places365_test_00168751.jpg +Places365_test_00168756.jpg +Places365_test_00168762.jpg +Places365_test_00168764.jpg +Places365_test_00168772.jpg +Places365_test_00168775.jpg +Places365_test_00168789.jpg +Places365_test_00168803.jpg +Places365_test_00168815.jpg +Places365_test_00168846.jpg +Places365_test_00168849.jpg +Places365_test_00168851.jpg +Places365_test_00168853.jpg +Places365_test_00168881.jpg +Places365_test_00168891.jpg +Places365_test_00168910.jpg +Places365_test_00168912.jpg +Places365_test_00168926.jpg +Places365_test_00168940.jpg +Places365_test_00168944.jpg +Places365_test_00168946.jpg +Places365_test_00168963.jpg +Places365_test_00168975.jpg +Places365_test_00168989.jpg +Places365_test_00168998.jpg +Places365_test_00169005.jpg +Places365_test_00169006.jpg +Places365_test_00169029.jpg +Places365_test_00169040.jpg +Places365_test_00169046.jpg +Places365_test_00169062.jpg +Places365_test_00169088.jpg +Places365_test_00169113.jpg +Places365_test_00169120.jpg +Places365_test_00169138.jpg +Places365_test_00169142.jpg +Places365_test_00169147.jpg +Places365_test_00169160.jpg +Places365_test_00169172.jpg +Places365_test_00169199.jpg +Places365_test_00169224.jpg +Places365_test_00169243.jpg +Places365_test_00169257.jpg +Places365_test_00169261.jpg +Places365_test_00169300.jpg +Places365_test_00169302.jpg +Places365_test_00169306.jpg +Places365_test_00169309.jpg +Places365_test_00169320.jpg +Places365_test_00169322.jpg +Places365_test_00169331.jpg +Places365_test_00169344.jpg +Places365_test_00169358.jpg +Places365_test_00169405.jpg +Places365_test_00169425.jpg +Places365_test_00169436.jpg +Places365_test_00169441.jpg +Places365_test_00169451.jpg +Places365_test_00169466.jpg +Places365_test_00169469.jpg +Places365_test_00169472.jpg +Places365_test_00169502.jpg +Places365_test_00169503.jpg +Places365_test_00169515.jpg +Places365_test_00169517.jpg +Places365_test_00169533.jpg +Places365_test_00169537.jpg +Places365_test_00169543.jpg +Places365_test_00169553.jpg +Places365_test_00169575.jpg +Places365_test_00169588.jpg +Places365_test_00169595.jpg +Places365_test_00169608.jpg +Places365_test_00169629.jpg +Places365_test_00169644.jpg +Places365_test_00169647.jpg +Places365_test_00169667.jpg +Places365_test_00169671.jpg +Places365_test_00169674.jpg +Places365_test_00169680.jpg +Places365_test_00169688.jpg +Places365_test_00169696.jpg +Places365_test_00169712.jpg +Places365_test_00169727.jpg +Places365_test_00169734.jpg +Places365_test_00169748.jpg +Places365_test_00169750.jpg +Places365_test_00169759.jpg +Places365_test_00169783.jpg +Places365_test_00169809.jpg +Places365_test_00169812.jpg +Places365_test_00169815.jpg +Places365_test_00169828.jpg +Places365_test_00169832.jpg +Places365_test_00169846.jpg +Places365_test_00169854.jpg +Places365_test_00169873.jpg +Places365_test_00169892.jpg +Places365_test_00169922.jpg +Places365_test_00169926.jpg +Places365_test_00169929.jpg +Places365_test_00169942.jpg +Places365_test_00169946.jpg +Places365_test_00169956.jpg +Places365_test_00169980.jpg +Places365_test_00169981.jpg +Places365_test_00169997.jpg +Places365_test_00170003.jpg +Places365_test_00170017.jpg +Places365_test_00170039.jpg +Places365_test_00170045.jpg +Places365_test_00170048.jpg +Places365_test_00170062.jpg +Places365_test_00170063.jpg +Places365_test_00170065.jpg +Places365_test_00170072.jpg +Places365_test_00170073.jpg +Places365_test_00170092.jpg +Places365_test_00170098.jpg +Places365_test_00170109.jpg +Places365_test_00170122.jpg +Places365_test_00170123.jpg +Places365_test_00170143.jpg +Places365_test_00170150.jpg +Places365_test_00170169.jpg +Places365_test_00170194.jpg +Places365_test_00170206.jpg +Places365_test_00170216.jpg +Places365_test_00170224.jpg +Places365_test_00170238.jpg +Places365_test_00170272.jpg +Places365_test_00170278.jpg +Places365_test_00170280.jpg +Places365_test_00170284.jpg +Places365_test_00170310.jpg +Places365_test_00170313.jpg +Places365_test_00170336.jpg +Places365_test_00170351.jpg +Places365_test_00170364.jpg +Places365_test_00170387.jpg +Places365_test_00170405.jpg +Places365_test_00170410.jpg +Places365_test_00170431.jpg +Places365_test_00170434.jpg +Places365_test_00170455.jpg +Places365_test_00170463.jpg +Places365_test_00170468.jpg +Places365_test_00170476.jpg +Places365_test_00170478.jpg +Places365_test_00170479.jpg +Places365_test_00170486.jpg +Places365_test_00170492.jpg +Places365_test_00170498.jpg +Places365_test_00170511.jpg +Places365_test_00170524.jpg +Places365_test_00170541.jpg +Places365_test_00170556.jpg +Places365_test_00170563.jpg +Places365_test_00170567.jpg +Places365_test_00170575.jpg +Places365_test_00170585.jpg +Places365_test_00170590.jpg +Places365_test_00170598.jpg +Places365_test_00170619.jpg +Places365_test_00170636.jpg +Places365_test_00170637.jpg +Places365_test_00170659.jpg +Places365_test_00170669.jpg +Places365_test_00170671.jpg +Places365_test_00170678.jpg +Places365_test_00170694.jpg +Places365_test_00170706.jpg +Places365_test_00170707.jpg +Places365_test_00170735.jpg +Places365_test_00170761.jpg +Places365_test_00170768.jpg +Places365_test_00170769.jpg +Places365_test_00170780.jpg +Places365_test_00170787.jpg +Places365_test_00170791.jpg +Places365_test_00170793.jpg +Places365_test_00170798.jpg +Places365_test_00170803.jpg +Places365_test_00170807.jpg +Places365_test_00170809.jpg +Places365_test_00170812.jpg +Places365_test_00170813.jpg +Places365_test_00170814.jpg +Places365_test_00170815.jpg +Places365_test_00170816.jpg +Places365_test_00170843.jpg +Places365_test_00170855.jpg +Places365_test_00170861.jpg +Places365_test_00170869.jpg +Places365_test_00170875.jpg +Places365_test_00170884.jpg +Places365_test_00170908.jpg +Places365_test_00170909.jpg +Places365_test_00170930.jpg +Places365_test_00170950.jpg +Places365_test_00170966.jpg +Places365_test_00170995.jpg +Places365_test_00171004.jpg +Places365_test_00171010.jpg +Places365_test_00171011.jpg +Places365_test_00171036.jpg +Places365_test_00171040.jpg +Places365_test_00171042.jpg +Places365_test_00171061.jpg +Places365_test_00171071.jpg +Places365_test_00171073.jpg +Places365_test_00171099.jpg +Places365_test_00171103.jpg +Places365_test_00171111.jpg +Places365_test_00171134.jpg +Places365_test_00171156.jpg +Places365_test_00171166.jpg +Places365_test_00171170.jpg +Places365_test_00171181.jpg +Places365_test_00171189.jpg +Places365_test_00171192.jpg +Places365_test_00171197.jpg +Places365_test_00171200.jpg +Places365_test_00171201.jpg +Places365_test_00171210.jpg +Places365_test_00171211.jpg +Places365_test_00171222.jpg +Places365_test_00171236.jpg +Places365_test_00171262.jpg +Places365_test_00171286.jpg +Places365_test_00171315.jpg +Places365_test_00171368.jpg +Places365_test_00171372.jpg +Places365_test_00171388.jpg +Places365_test_00171389.jpg +Places365_test_00171416.jpg +Places365_test_00171431.jpg +Places365_test_00171445.jpg +Places365_test_00171471.jpg +Places365_test_00171473.jpg +Places365_test_00171494.jpg +Places365_test_00171498.jpg +Places365_test_00171500.jpg +Places365_test_00171509.jpg +Places365_test_00171523.jpg +Places365_test_00171536.jpg +Places365_test_00171541.jpg +Places365_test_00171559.jpg +Places365_test_00171567.jpg +Places365_test_00171581.jpg +Places365_test_00171583.jpg +Places365_test_00171600.jpg +Places365_test_00171608.jpg +Places365_test_00171611.jpg +Places365_test_00171614.jpg +Places365_test_00171620.jpg +Places365_test_00171621.jpg +Places365_test_00171631.jpg +Places365_test_00171651.jpg +Places365_test_00171670.jpg +Places365_test_00171683.jpg +Places365_test_00171687.jpg +Places365_test_00171688.jpg +Places365_test_00171697.jpg +Places365_test_00171707.jpg +Places365_test_00171721.jpg +Places365_test_00171725.jpg +Places365_test_00171731.jpg +Places365_test_00171737.jpg +Places365_test_00171749.jpg +Places365_test_00171752.jpg +Places365_test_00171759.jpg +Places365_test_00171766.jpg +Places365_test_00171788.jpg +Places365_test_00171791.jpg +Places365_test_00171798.jpg +Places365_test_00171813.jpg +Places365_test_00171814.jpg +Places365_test_00171815.jpg +Places365_test_00171817.jpg +Places365_test_00171823.jpg +Places365_test_00171836.jpg +Places365_test_00171863.jpg +Places365_test_00171875.jpg +Places365_test_00171885.jpg +Places365_test_00171888.jpg +Places365_test_00171893.jpg +Places365_test_00171897.jpg +Places365_test_00171920.jpg +Places365_test_00171925.jpg +Places365_test_00171931.jpg +Places365_test_00171941.jpg +Places365_test_00171951.jpg +Places365_test_00171960.jpg +Places365_test_00171971.jpg +Places365_test_00171983.jpg +Places365_test_00171988.jpg +Places365_test_00172011.jpg +Places365_test_00172019.jpg +Places365_test_00172031.jpg +Places365_test_00172047.jpg +Places365_test_00172051.jpg +Places365_test_00172058.jpg +Places365_test_00172066.jpg +Places365_test_00172086.jpg +Places365_test_00172087.jpg +Places365_test_00172122.jpg +Places365_test_00172132.jpg +Places365_test_00172153.jpg +Places365_test_00172163.jpg +Places365_test_00172165.jpg +Places365_test_00172167.jpg +Places365_test_00172182.jpg +Places365_test_00172188.jpg +Places365_test_00172214.jpg +Places365_test_00172221.jpg +Places365_test_00172231.jpg +Places365_test_00172234.jpg +Places365_test_00172235.jpg +Places365_test_00172246.jpg +Places365_test_00172257.jpg +Places365_test_00172266.jpg +Places365_test_00172281.jpg +Places365_test_00172288.jpg +Places365_test_00172292.jpg +Places365_test_00172331.jpg +Places365_test_00172334.jpg +Places365_test_00172341.jpg +Places365_test_00172354.jpg +Places365_test_00172359.jpg +Places365_test_00172367.jpg +Places365_test_00172371.jpg +Places365_test_00172372.jpg +Places365_test_00172383.jpg +Places365_test_00172393.jpg +Places365_test_00172398.jpg +Places365_test_00172409.jpg +Places365_test_00172416.jpg +Places365_test_00172417.jpg +Places365_test_00172418.jpg +Places365_test_00172432.jpg +Places365_test_00172440.jpg +Places365_test_00172451.jpg +Places365_test_00172456.jpg +Places365_test_00172495.jpg +Places365_test_00172514.jpg +Places365_test_00172529.jpg +Places365_test_00172562.jpg +Places365_test_00172567.jpg +Places365_test_00172573.jpg +Places365_test_00172575.jpg +Places365_test_00172588.jpg +Places365_test_00172599.jpg +Places365_test_00172609.jpg +Places365_test_00172611.jpg +Places365_test_00172623.jpg +Places365_test_00172626.jpg +Places365_test_00172637.jpg +Places365_test_00172666.jpg +Places365_test_00172667.jpg +Places365_test_00172678.jpg +Places365_test_00172721.jpg +Places365_test_00172731.jpg +Places365_test_00172735.jpg +Places365_test_00172744.jpg +Places365_test_00172755.jpg +Places365_test_00172781.jpg +Places365_test_00172784.jpg +Places365_test_00172790.jpg +Places365_test_00172814.jpg +Places365_test_00172818.jpg +Places365_test_00172820.jpg +Places365_test_00172825.jpg +Places365_test_00172831.jpg +Places365_test_00172841.jpg +Places365_test_00172852.jpg +Places365_test_00172862.jpg +Places365_test_00172876.jpg +Places365_test_00172891.jpg +Places365_test_00172900.jpg +Places365_test_00172902.jpg +Places365_test_00172922.jpg +Places365_test_00172935.jpg +Places365_test_00172941.jpg +Places365_test_00172948.jpg +Places365_test_00172956.jpg +Places365_test_00172960.jpg +Places365_test_00172961.jpg +Places365_test_00172967.jpg +Places365_test_00172982.jpg +Places365_test_00173007.jpg +Places365_test_00173012.jpg +Places365_test_00173016.jpg +Places365_test_00173025.jpg +Places365_test_00173049.jpg +Places365_test_00173053.jpg +Places365_test_00173054.jpg +Places365_test_00173071.jpg +Places365_test_00173074.jpg +Places365_test_00173078.jpg +Places365_test_00173085.jpg +Places365_test_00173093.jpg +Places365_test_00173094.jpg +Places365_test_00173096.jpg +Places365_test_00173114.jpg +Places365_test_00173125.jpg +Places365_test_00173133.jpg +Places365_test_00173139.jpg +Places365_test_00173160.jpg +Places365_test_00173163.jpg +Places365_test_00173169.jpg +Places365_test_00173187.jpg +Places365_test_00173192.jpg +Places365_test_00173203.jpg +Places365_test_00173208.jpg +Places365_test_00173213.jpg +Places365_test_00173216.jpg +Places365_test_00173218.jpg +Places365_test_00173230.jpg +Places365_test_00173237.jpg +Places365_test_00173242.jpg +Places365_test_00173249.jpg +Places365_test_00173254.jpg +Places365_test_00173269.jpg +Places365_test_00173277.jpg +Places365_test_00173318.jpg +Places365_test_00173323.jpg +Places365_test_00173333.jpg +Places365_test_00173336.jpg +Places365_test_00173351.jpg +Places365_test_00173378.jpg +Places365_test_00173394.jpg +Places365_test_00173397.jpg +Places365_test_00173406.jpg +Places365_test_00173413.jpg +Places365_test_00173418.jpg +Places365_test_00173447.jpg +Places365_test_00173459.jpg +Places365_test_00173477.jpg +Places365_test_00173478.jpg +Places365_test_00173480.jpg +Places365_test_00173488.jpg +Places365_test_00173489.jpg +Places365_test_00173492.jpg +Places365_test_00173494.jpg +Places365_test_00173502.jpg +Places365_test_00173506.jpg +Places365_test_00173522.jpg +Places365_test_00173523.jpg +Places365_test_00173541.jpg +Places365_test_00173548.jpg +Places365_test_00173582.jpg +Places365_test_00173587.jpg +Places365_test_00173588.jpg +Places365_test_00173607.jpg +Places365_test_00173611.jpg +Places365_test_00173617.jpg +Places365_test_00173650.jpg +Places365_test_00173653.jpg +Places365_test_00173672.jpg +Places365_test_00173675.jpg +Places365_test_00173714.jpg +Places365_test_00173717.jpg +Places365_test_00173720.jpg +Places365_test_00173732.jpg +Places365_test_00173752.jpg +Places365_test_00173773.jpg +Places365_test_00173794.jpg +Places365_test_00173801.jpg +Places365_test_00173815.jpg +Places365_test_00173828.jpg +Places365_test_00173835.jpg +Places365_test_00173848.jpg +Places365_test_00173872.jpg +Places365_test_00173895.jpg +Places365_test_00173925.jpg +Places365_test_00173945.jpg +Places365_test_00173950.jpg +Places365_test_00173951.jpg +Places365_test_00173960.jpg +Places365_test_00173974.jpg +Places365_test_00173991.jpg +Places365_test_00173993.jpg +Places365_test_00174033.jpg +Places365_test_00174058.jpg +Places365_test_00174075.jpg +Places365_test_00174078.jpg +Places365_test_00174103.jpg +Places365_test_00174107.jpg +Places365_test_00174122.jpg +Places365_test_00174131.jpg +Places365_test_00174133.jpg +Places365_test_00174140.jpg +Places365_test_00174144.jpg +Places365_test_00174158.jpg +Places365_test_00174159.jpg +Places365_test_00174166.jpg +Places365_test_00174169.jpg +Places365_test_00174170.jpg +Places365_test_00174183.jpg +Places365_test_00174191.jpg +Places365_test_00174195.jpg +Places365_test_00174202.jpg +Places365_test_00174217.jpg +Places365_test_00174221.jpg +Places365_test_00174253.jpg +Places365_test_00174255.jpg +Places365_test_00174262.jpg +Places365_test_00174272.jpg +Places365_test_00174284.jpg +Places365_test_00174290.jpg +Places365_test_00174291.jpg +Places365_test_00174316.jpg +Places365_test_00174321.jpg +Places365_test_00174324.jpg +Places365_test_00174334.jpg +Places365_test_00174354.jpg +Places365_test_00174363.jpg +Places365_test_00174373.jpg +Places365_test_00174398.jpg +Places365_test_00174418.jpg +Places365_test_00174426.jpg +Places365_test_00174435.jpg +Places365_test_00174439.jpg +Places365_test_00174449.jpg +Places365_test_00174450.jpg +Places365_test_00174470.jpg +Places365_test_00174512.jpg +Places365_test_00174516.jpg +Places365_test_00174538.jpg +Places365_test_00174578.jpg +Places365_test_00174593.jpg +Places365_test_00174610.jpg +Places365_test_00174646.jpg +Places365_test_00174657.jpg +Places365_test_00174663.jpg +Places365_test_00174664.jpg +Places365_test_00174700.jpg +Places365_test_00174719.jpg +Places365_test_00174744.jpg +Places365_test_00174745.jpg +Places365_test_00174751.jpg +Places365_test_00174765.jpg +Places365_test_00174766.jpg +Places365_test_00174772.jpg +Places365_test_00174791.jpg +Places365_test_00174793.jpg +Places365_test_00174806.jpg +Places365_test_00174814.jpg +Places365_test_00174825.jpg +Places365_test_00174829.jpg +Places365_test_00174838.jpg +Places365_test_00174845.jpg +Places365_test_00174846.jpg +Places365_test_00174893.jpg +Places365_test_00174895.jpg +Places365_test_00174909.jpg +Places365_test_00174919.jpg +Places365_test_00174922.jpg +Places365_test_00174925.jpg +Places365_test_00174939.jpg +Places365_test_00174946.jpg +Places365_test_00174973.jpg +Places365_test_00175011.jpg +Places365_test_00175020.jpg +Places365_test_00175021.jpg +Places365_test_00175030.jpg +Places365_test_00175031.jpg +Places365_test_00175041.jpg +Places365_test_00175045.jpg +Places365_test_00175055.jpg +Places365_test_00175061.jpg +Places365_test_00175065.jpg +Places365_test_00175067.jpg +Places365_test_00175073.jpg +Places365_test_00175093.jpg +Places365_test_00175096.jpg +Places365_test_00175159.jpg +Places365_test_00175162.jpg +Places365_test_00175164.jpg +Places365_test_00175171.jpg +Places365_test_00175175.jpg +Places365_test_00175208.jpg +Places365_test_00175219.jpg +Places365_test_00175221.jpg +Places365_test_00175222.jpg +Places365_test_00175234.jpg +Places365_test_00175238.jpg +Places365_test_00175241.jpg +Places365_test_00175246.jpg +Places365_test_00175258.jpg +Places365_test_00175275.jpg +Places365_test_00175282.jpg +Places365_test_00175285.jpg +Places365_test_00175286.jpg +Places365_test_00175295.jpg +Places365_test_00175302.jpg +Places365_test_00175310.jpg +Places365_test_00175319.jpg +Places365_test_00175335.jpg +Places365_test_00175350.jpg +Places365_test_00175387.jpg +Places365_test_00175395.jpg +Places365_test_00175397.jpg +Places365_test_00175404.jpg +Places365_test_00175411.jpg +Places365_test_00175417.jpg +Places365_test_00175422.jpg +Places365_test_00175427.jpg +Places365_test_00175429.jpg +Places365_test_00175441.jpg +Places365_test_00175487.jpg +Places365_test_00175506.jpg +Places365_test_00175519.jpg +Places365_test_00175553.jpg +Places365_test_00175558.jpg +Places365_test_00175576.jpg +Places365_test_00175579.jpg +Places365_test_00175585.jpg +Places365_test_00175606.jpg +Places365_test_00175610.jpg +Places365_test_00175622.jpg +Places365_test_00175626.jpg +Places365_test_00175627.jpg +Places365_test_00175636.jpg +Places365_test_00175657.jpg +Places365_test_00175679.jpg +Places365_test_00175690.jpg +Places365_test_00175700.jpg +Places365_test_00175742.jpg +Places365_test_00175744.jpg +Places365_test_00175769.jpg +Places365_test_00175772.jpg +Places365_test_00175801.jpg +Places365_test_00175803.jpg +Places365_test_00175806.jpg +Places365_test_00175809.jpg +Places365_test_00175812.jpg +Places365_test_00175819.jpg +Places365_test_00175828.jpg +Places365_test_00175847.jpg +Places365_test_00175862.jpg +Places365_test_00175872.jpg +Places365_test_00175884.jpg +Places365_test_00175886.jpg +Places365_test_00175901.jpg +Places365_test_00175908.jpg +Places365_test_00175910.jpg +Places365_test_00175927.jpg +Places365_test_00175928.jpg +Places365_test_00175938.jpg +Places365_test_00175944.jpg +Places365_test_00175973.jpg +Places365_test_00175982.jpg +Places365_test_00175984.jpg +Places365_test_00175989.jpg +Places365_test_00176003.jpg +Places365_test_00176005.jpg +Places365_test_00176007.jpg +Places365_test_00176011.jpg +Places365_test_00176034.jpg +Places365_test_00176038.jpg +Places365_test_00176043.jpg +Places365_test_00176049.jpg +Places365_test_00176068.jpg +Places365_test_00176071.jpg +Places365_test_00176074.jpg +Places365_test_00176084.jpg +Places365_test_00176086.jpg +Places365_test_00176088.jpg +Places365_test_00176091.jpg +Places365_test_00176097.jpg +Places365_test_00176113.jpg +Places365_test_00176122.jpg +Places365_test_00176127.jpg +Places365_test_00176133.jpg +Places365_test_00176135.jpg +Places365_test_00176147.jpg +Places365_test_00176154.jpg +Places365_test_00176156.jpg +Places365_test_00176160.jpg +Places365_test_00176164.jpg +Places365_test_00176168.jpg +Places365_test_00176169.jpg +Places365_test_00176182.jpg +Places365_test_00176230.jpg +Places365_test_00176241.jpg +Places365_test_00176264.jpg +Places365_test_00176268.jpg +Places365_test_00176276.jpg +Places365_test_00176278.jpg +Places365_test_00176292.jpg +Places365_test_00176305.jpg +Places365_test_00176324.jpg +Places365_test_00176331.jpg +Places365_test_00176381.jpg +Places365_test_00176386.jpg +Places365_test_00176412.jpg +Places365_test_00176420.jpg +Places365_test_00176425.jpg +Places365_test_00176428.jpg +Places365_test_00176432.jpg +Places365_test_00176433.jpg +Places365_test_00176452.jpg +Places365_test_00176478.jpg +Places365_test_00176482.jpg +Places365_test_00176502.jpg +Places365_test_00176510.jpg +Places365_test_00176524.jpg +Places365_test_00176529.jpg +Places365_test_00176530.jpg +Places365_test_00176535.jpg +Places365_test_00176537.jpg +Places365_test_00176555.jpg +Places365_test_00176573.jpg +Places365_test_00176588.jpg +Places365_test_00176618.jpg +Places365_test_00176632.jpg +Places365_test_00176636.jpg +Places365_test_00176643.jpg +Places365_test_00176649.jpg +Places365_test_00176654.jpg +Places365_test_00176689.jpg +Places365_test_00176696.jpg +Places365_test_00176716.jpg +Places365_test_00176717.jpg +Places365_test_00176747.jpg +Places365_test_00176764.jpg +Places365_test_00176769.jpg +Places365_test_00176776.jpg +Places365_test_00176805.jpg +Places365_test_00176812.jpg +Places365_test_00176815.jpg +Places365_test_00176824.jpg +Places365_test_00176827.jpg +Places365_test_00176832.jpg +Places365_test_00176837.jpg +Places365_test_00176841.jpg +Places365_test_00176842.jpg +Places365_test_00176846.jpg +Places365_test_00176858.jpg +Places365_test_00176870.jpg +Places365_test_00176872.jpg +Places365_test_00176873.jpg +Places365_test_00176879.jpg +Places365_test_00176893.jpg +Places365_test_00176901.jpg +Places365_test_00176904.jpg +Places365_test_00176915.jpg +Places365_test_00176930.jpg +Places365_test_00176942.jpg +Places365_test_00176962.jpg +Places365_test_00176963.jpg +Places365_test_00176965.jpg +Places365_test_00176966.jpg +Places365_test_00176976.jpg +Places365_test_00176978.jpg +Places365_test_00176996.jpg +Places365_test_00176999.jpg +Places365_test_00177004.jpg +Places365_test_00177006.jpg +Places365_test_00177018.jpg +Places365_test_00177033.jpg +Places365_test_00177048.jpg +Places365_test_00177055.jpg +Places365_test_00177056.jpg +Places365_test_00177063.jpg +Places365_test_00177064.jpg +Places365_test_00177065.jpg +Places365_test_00177070.jpg +Places365_test_00177072.jpg +Places365_test_00177078.jpg +Places365_test_00177085.jpg +Places365_test_00177088.jpg +Places365_test_00177122.jpg +Places365_test_00177155.jpg +Places365_test_00177165.jpg +Places365_test_00177169.jpg +Places365_test_00177173.jpg +Places365_test_00177184.jpg +Places365_test_00177206.jpg +Places365_test_00177209.jpg +Places365_test_00177224.jpg +Places365_test_00177271.jpg +Places365_test_00177292.jpg +Places365_test_00177303.jpg +Places365_test_00177331.jpg +Places365_test_00177334.jpg +Places365_test_00177336.jpg +Places365_test_00177342.jpg +Places365_test_00177352.jpg +Places365_test_00177366.jpg +Places365_test_00177388.jpg +Places365_test_00177392.jpg +Places365_test_00177394.jpg +Places365_test_00177397.jpg +Places365_test_00177409.jpg +Places365_test_00177427.jpg +Places365_test_00177429.jpg +Places365_test_00177442.jpg +Places365_test_00177443.jpg +Places365_test_00177452.jpg +Places365_test_00177454.jpg +Places365_test_00177463.jpg +Places365_test_00177465.jpg +Places365_test_00177470.jpg +Places365_test_00177481.jpg +Places365_test_00177489.jpg +Places365_test_00177494.jpg +Places365_test_00177518.jpg +Places365_test_00177522.jpg +Places365_test_00177532.jpg +Places365_test_00177539.jpg +Places365_test_00177558.jpg +Places365_test_00177584.jpg +Places365_test_00177587.jpg +Places365_test_00177625.jpg +Places365_test_00177635.jpg +Places365_test_00177643.jpg +Places365_test_00177666.jpg +Places365_test_00177671.jpg +Places365_test_00177675.jpg +Places365_test_00177688.jpg +Places365_test_00177694.jpg +Places365_test_00177711.jpg +Places365_test_00177726.jpg +Places365_test_00177733.jpg +Places365_test_00177738.jpg +Places365_test_00177746.jpg +Places365_test_00177764.jpg +Places365_test_00177779.jpg +Places365_test_00177797.jpg +Places365_test_00177808.jpg +Places365_test_00177821.jpg +Places365_test_00177824.jpg +Places365_test_00177836.jpg +Places365_test_00177867.jpg +Places365_test_00177877.jpg +Places365_test_00177887.jpg +Places365_test_00177905.jpg +Places365_test_00177921.jpg +Places365_test_00177929.jpg +Places365_test_00177949.jpg +Places365_test_00177959.jpg +Places365_test_00177965.jpg +Places365_test_00177977.jpg +Places365_test_00177984.jpg +Places365_test_00177999.jpg +Places365_test_00178007.jpg +Places365_test_00178011.jpg +Places365_test_00178028.jpg +Places365_test_00178050.jpg +Places365_test_00178054.jpg +Places365_test_00178062.jpg +Places365_test_00178072.jpg +Places365_test_00178081.jpg +Places365_test_00178085.jpg +Places365_test_00178087.jpg +Places365_test_00178090.jpg +Places365_test_00178097.jpg +Places365_test_00178100.jpg +Places365_test_00178109.jpg +Places365_test_00178116.jpg +Places365_test_00178123.jpg +Places365_test_00178128.jpg +Places365_test_00178149.jpg +Places365_test_00178160.jpg +Places365_test_00178162.jpg +Places365_test_00178169.jpg +Places365_test_00178202.jpg +Places365_test_00178207.jpg +Places365_test_00178227.jpg +Places365_test_00178229.jpg +Places365_test_00178252.jpg +Places365_test_00178253.jpg +Places365_test_00178279.jpg +Places365_test_00178295.jpg +Places365_test_00178303.jpg +Places365_test_00178306.jpg +Places365_test_00178311.jpg +Places365_test_00178312.jpg +Places365_test_00178327.jpg +Places365_test_00178341.jpg +Places365_test_00178347.jpg +Places365_test_00178355.jpg +Places365_test_00178358.jpg +Places365_test_00178362.jpg +Places365_test_00178366.jpg +Places365_test_00178384.jpg +Places365_test_00178398.jpg +Places365_test_00178404.jpg +Places365_test_00178416.jpg +Places365_test_00178417.jpg +Places365_test_00178423.jpg +Places365_test_00178435.jpg +Places365_test_00178437.jpg +Places365_test_00178453.jpg +Places365_test_00178475.jpg +Places365_test_00178484.jpg +Places365_test_00178487.jpg +Places365_test_00178507.jpg +Places365_test_00178510.jpg +Places365_test_00178518.jpg +Places365_test_00178526.jpg +Places365_test_00178546.jpg +Places365_test_00178550.jpg +Places365_test_00178551.jpg +Places365_test_00178556.jpg +Places365_test_00178558.jpg +Places365_test_00178574.jpg +Places365_test_00178592.jpg +Places365_test_00178632.jpg +Places365_test_00178645.jpg +Places365_test_00178651.jpg +Places365_test_00178669.jpg +Places365_test_00178674.jpg +Places365_test_00178675.jpg +Places365_test_00178677.jpg +Places365_test_00178680.jpg +Places365_test_00178684.jpg +Places365_test_00178696.jpg +Places365_test_00178721.jpg +Places365_test_00178725.jpg +Places365_test_00178727.jpg +Places365_test_00178742.jpg +Places365_test_00178753.jpg +Places365_test_00178756.jpg +Places365_test_00178774.jpg +Places365_test_00178780.jpg +Places365_test_00178790.jpg +Places365_test_00178828.jpg +Places365_test_00178839.jpg +Places365_test_00178852.jpg +Places365_test_00178856.jpg +Places365_test_00178902.jpg +Places365_test_00178912.jpg +Places365_test_00178932.jpg +Places365_test_00178938.jpg +Places365_test_00178953.jpg +Places365_test_00178955.jpg +Places365_test_00178969.jpg +Places365_test_00178971.jpg +Places365_test_00178987.jpg +Places365_test_00178992.jpg +Places365_test_00178993.jpg +Places365_test_00179006.jpg +Places365_test_00179008.jpg +Places365_test_00179041.jpg +Places365_test_00179080.jpg +Places365_test_00179084.jpg +Places365_test_00179103.jpg +Places365_test_00179127.jpg +Places365_test_00179128.jpg +Places365_test_00179164.jpg +Places365_test_00179184.jpg +Places365_test_00179189.jpg +Places365_test_00179200.jpg +Places365_test_00179201.jpg +Places365_test_00179226.jpg +Places365_test_00179248.jpg +Places365_test_00179250.jpg +Places365_test_00179266.jpg +Places365_test_00179272.jpg +Places365_test_00179299.jpg +Places365_test_00179302.jpg +Places365_test_00179317.jpg +Places365_test_00179321.jpg +Places365_test_00179334.jpg +Places365_test_00179349.jpg +Places365_test_00179365.jpg +Places365_test_00179376.jpg +Places365_test_00179379.jpg +Places365_test_00179386.jpg +Places365_test_00179428.jpg +Places365_test_00179447.jpg +Places365_test_00179463.jpg +Places365_test_00179476.jpg +Places365_test_00179504.jpg +Places365_test_00179553.jpg +Places365_test_00179580.jpg +Places365_test_00179585.jpg +Places365_test_00179591.jpg +Places365_test_00179599.jpg +Places365_test_00179608.jpg +Places365_test_00179611.jpg +Places365_test_00179625.jpg +Places365_test_00179651.jpg +Places365_test_00179655.jpg +Places365_test_00179660.jpg +Places365_test_00179670.jpg +Places365_test_00179672.jpg +Places365_test_00179674.jpg +Places365_test_00179677.jpg +Places365_test_00179689.jpg +Places365_test_00179694.jpg +Places365_test_00179711.jpg +Places365_test_00179725.jpg +Places365_test_00179727.jpg +Places365_test_00179756.jpg +Places365_test_00179759.jpg +Places365_test_00179782.jpg +Places365_test_00179793.jpg +Places365_test_00179803.jpg +Places365_test_00179809.jpg +Places365_test_00179825.jpg +Places365_test_00179837.jpg +Places365_test_00179858.jpg +Places365_test_00179907.jpg +Places365_test_00179912.jpg +Places365_test_00179923.jpg +Places365_test_00179933.jpg +Places365_test_00179939.jpg +Places365_test_00179982.jpg +Places365_test_00179990.jpg +Places365_test_00180005.jpg +Places365_test_00180017.jpg +Places365_test_00180047.jpg +Places365_test_00180059.jpg +Places365_test_00180065.jpg +Places365_test_00180077.jpg +Places365_test_00180096.jpg +Places365_test_00180111.jpg +Places365_test_00180112.jpg +Places365_test_00180126.jpg +Places365_test_00180144.jpg +Places365_test_00180149.jpg +Places365_test_00180154.jpg +Places365_test_00180162.jpg +Places365_test_00180176.jpg +Places365_test_00180183.jpg +Places365_test_00180201.jpg +Places365_test_00180208.jpg +Places365_test_00180210.jpg +Places365_test_00180226.jpg +Places365_test_00180249.jpg +Places365_test_00180252.jpg +Places365_test_00180255.jpg +Places365_test_00180257.jpg +Places365_test_00180261.jpg +Places365_test_00180276.jpg +Places365_test_00180299.jpg +Places365_test_00180305.jpg +Places365_test_00180312.jpg +Places365_test_00180318.jpg +Places365_test_00180320.jpg +Places365_test_00180341.jpg +Places365_test_00180353.jpg +Places365_test_00180373.jpg +Places365_test_00180382.jpg +Places365_test_00180384.jpg +Places365_test_00180387.jpg +Places365_test_00180396.jpg +Places365_test_00180408.jpg +Places365_test_00180426.jpg +Places365_test_00180430.jpg +Places365_test_00180453.jpg +Places365_test_00180461.jpg +Places365_test_00180495.jpg +Places365_test_00180497.jpg +Places365_test_00180501.jpg +Places365_test_00180506.jpg +Places365_test_00180511.jpg +Places365_test_00180532.jpg +Places365_test_00180546.jpg +Places365_test_00180565.jpg +Places365_test_00180566.jpg +Places365_test_00180568.jpg +Places365_test_00180583.jpg +Places365_test_00180591.jpg +Places365_test_00180600.jpg +Places365_test_00180610.jpg +Places365_test_00180614.jpg +Places365_test_00180617.jpg +Places365_test_00180618.jpg +Places365_test_00180625.jpg +Places365_test_00180632.jpg +Places365_test_00180671.jpg +Places365_test_00180682.jpg +Places365_test_00180690.jpg +Places365_test_00180692.jpg +Places365_test_00180713.jpg +Places365_test_00180718.jpg +Places365_test_00180738.jpg +Places365_test_00180767.jpg +Places365_test_00180774.jpg +Places365_test_00180782.jpg +Places365_test_00180790.jpg +Places365_test_00180795.jpg +Places365_test_00180811.jpg +Places365_test_00180826.jpg +Places365_test_00180838.jpg +Places365_test_00180859.jpg +Places365_test_00180873.jpg +Places365_test_00180881.jpg +Places365_test_00180882.jpg +Places365_test_00180884.jpg +Places365_test_00180886.jpg +Places365_test_00180887.jpg +Places365_test_00180902.jpg +Places365_test_00180905.jpg +Places365_test_00180908.jpg +Places365_test_00180911.jpg +Places365_test_00180915.jpg +Places365_test_00180927.jpg +Places365_test_00180951.jpg +Places365_test_00180961.jpg +Places365_test_00180985.jpg +Places365_test_00181001.jpg +Places365_test_00181004.jpg +Places365_test_00181014.jpg +Places365_test_00181033.jpg +Places365_test_00181041.jpg +Places365_test_00181047.jpg +Places365_test_00181053.jpg +Places365_test_00181059.jpg +Places365_test_00181083.jpg +Places365_test_00181109.jpg +Places365_test_00181121.jpg +Places365_test_00181128.jpg +Places365_test_00181142.jpg +Places365_test_00181156.jpg +Places365_test_00181158.jpg +Places365_test_00181159.jpg +Places365_test_00181163.jpg +Places365_test_00181165.jpg +Places365_test_00181170.jpg +Places365_test_00181174.jpg +Places365_test_00181200.jpg +Places365_test_00181201.jpg +Places365_test_00181204.jpg +Places365_test_00181208.jpg +Places365_test_00181212.jpg +Places365_test_00181213.jpg +Places365_test_00181245.jpg +Places365_test_00181258.jpg +Places365_test_00181260.jpg +Places365_test_00181264.jpg +Places365_test_00181268.jpg +Places365_test_00181269.jpg +Places365_test_00181272.jpg +Places365_test_00181280.jpg +Places365_test_00181291.jpg +Places365_test_00181299.jpg +Places365_test_00181312.jpg +Places365_test_00181328.jpg +Places365_test_00181330.jpg +Places365_test_00181338.jpg +Places365_test_00181350.jpg +Places365_test_00181358.jpg +Places365_test_00181361.jpg +Places365_test_00181363.jpg +Places365_test_00181370.jpg +Places365_test_00181384.jpg +Places365_test_00181415.jpg +Places365_test_00181427.jpg +Places365_test_00181442.jpg +Places365_test_00181451.jpg +Places365_test_00181456.jpg +Places365_test_00181464.jpg +Places365_test_00181471.jpg +Places365_test_00181480.jpg +Places365_test_00181484.jpg +Places365_test_00181490.jpg +Places365_test_00181493.jpg +Places365_test_00181519.jpg +Places365_test_00181524.jpg +Places365_test_00181526.jpg +Places365_test_00181529.jpg +Places365_test_00181535.jpg +Places365_test_00181544.jpg +Places365_test_00181547.jpg +Places365_test_00181548.jpg +Places365_test_00181552.jpg +Places365_test_00181555.jpg +Places365_test_00181565.jpg +Places365_test_00181570.jpg +Places365_test_00181577.jpg +Places365_test_00181597.jpg +Places365_test_00181611.jpg +Places365_test_00181617.jpg +Places365_test_00181628.jpg +Places365_test_00181630.jpg +Places365_test_00181668.jpg +Places365_test_00181670.jpg +Places365_test_00181687.jpg +Places365_test_00181690.jpg +Places365_test_00181701.jpg +Places365_test_00181706.jpg +Places365_test_00181751.jpg +Places365_test_00181752.jpg +Places365_test_00181774.jpg +Places365_test_00181775.jpg +Places365_test_00181801.jpg +Places365_test_00181807.jpg +Places365_test_00181809.jpg +Places365_test_00181817.jpg +Places365_test_00181818.jpg +Places365_test_00181823.jpg +Places365_test_00181831.jpg +Places365_test_00181842.jpg +Places365_test_00181843.jpg +Places365_test_00181850.jpg +Places365_test_00181852.jpg +Places365_test_00181866.jpg +Places365_test_00181869.jpg +Places365_test_00181872.jpg +Places365_test_00181881.jpg +Places365_test_00181895.jpg +Places365_test_00181904.jpg +Places365_test_00181911.jpg +Places365_test_00181921.jpg +Places365_test_00181923.jpg +Places365_test_00181926.jpg +Places365_test_00181929.jpg +Places365_test_00181938.jpg +Places365_test_00181980.jpg +Places365_test_00181996.jpg +Places365_test_00182010.jpg +Places365_test_00182034.jpg +Places365_test_00182040.jpg +Places365_test_00182047.jpg +Places365_test_00182054.jpg +Places365_test_00182067.jpg +Places365_test_00182083.jpg +Places365_test_00182085.jpg +Places365_test_00182088.jpg +Places365_test_00182094.jpg +Places365_test_00182096.jpg +Places365_test_00182107.jpg +Places365_test_00182113.jpg +Places365_test_00182114.jpg +Places365_test_00182120.jpg +Places365_test_00182134.jpg +Places365_test_00182139.jpg +Places365_test_00182147.jpg +Places365_test_00182178.jpg +Places365_test_00182213.jpg +Places365_test_00182241.jpg +Places365_test_00182259.jpg +Places365_test_00182277.jpg +Places365_test_00182282.jpg +Places365_test_00182300.jpg +Places365_test_00182316.jpg +Places365_test_00182318.jpg +Places365_test_00182345.jpg +Places365_test_00182349.jpg +Places365_test_00182361.jpg +Places365_test_00182364.jpg +Places365_test_00182379.jpg +Places365_test_00182396.jpg +Places365_test_00182397.jpg +Places365_test_00182405.jpg +Places365_test_00182423.jpg +Places365_test_00182458.jpg +Places365_test_00182466.jpg +Places365_test_00182469.jpg +Places365_test_00182488.jpg +Places365_test_00182501.jpg +Places365_test_00182505.jpg +Places365_test_00182518.jpg +Places365_test_00182520.jpg +Places365_test_00182533.jpg +Places365_test_00182535.jpg +Places365_test_00182538.jpg +Places365_test_00182544.jpg +Places365_test_00182551.jpg +Places365_test_00182563.jpg +Places365_test_00182568.jpg +Places365_test_00182586.jpg +Places365_test_00182607.jpg +Places365_test_00182616.jpg +Places365_test_00182620.jpg +Places365_test_00182623.jpg +Places365_test_00182642.jpg +Places365_test_00182656.jpg +Places365_test_00182672.jpg +Places365_test_00182690.jpg +Places365_test_00182706.jpg +Places365_test_00182710.jpg +Places365_test_00182711.jpg +Places365_test_00182721.jpg +Places365_test_00182722.jpg +Places365_test_00182737.jpg +Places365_test_00182761.jpg +Places365_test_00182772.jpg +Places365_test_00182831.jpg +Places365_test_00182835.jpg +Places365_test_00182850.jpg +Places365_test_00182868.jpg +Places365_test_00182895.jpg +Places365_test_00182915.jpg +Places365_test_00182916.jpg +Places365_test_00182932.jpg +Places365_test_00182985.jpg +Places365_test_00182986.jpg +Places365_test_00182992.jpg +Places365_test_00183014.jpg +Places365_test_00183018.jpg +Places365_test_00183027.jpg +Places365_test_00183043.jpg +Places365_test_00183053.jpg +Places365_test_00183058.jpg +Places365_test_00183079.jpg +Places365_test_00183095.jpg +Places365_test_00183103.jpg +Places365_test_00183116.jpg +Places365_test_00183129.jpg +Places365_test_00183161.jpg +Places365_test_00183174.jpg +Places365_test_00183176.jpg +Places365_test_00183199.jpg +Places365_test_00183229.jpg +Places365_test_00183249.jpg +Places365_test_00183271.jpg +Places365_test_00183281.jpg +Places365_test_00183292.jpg +Places365_test_00183293.jpg +Places365_test_00183311.jpg +Places365_test_00183315.jpg +Places365_test_00183339.jpg +Places365_test_00183347.jpg +Places365_test_00183350.jpg +Places365_test_00183355.jpg +Places365_test_00183361.jpg +Places365_test_00183363.jpg +Places365_test_00183365.jpg +Places365_test_00183373.jpg +Places365_test_00183374.jpg +Places365_test_00183383.jpg +Places365_test_00183386.jpg +Places365_test_00183418.jpg +Places365_test_00183419.jpg +Places365_test_00183428.jpg +Places365_test_00183481.jpg +Places365_test_00183484.jpg +Places365_test_00183485.jpg +Places365_test_00183489.jpg +Places365_test_00183514.jpg +Places365_test_00183516.jpg +Places365_test_00183551.jpg +Places365_test_00183558.jpg +Places365_test_00183561.jpg +Places365_test_00183562.jpg +Places365_test_00183565.jpg +Places365_test_00183583.jpg +Places365_test_00183587.jpg +Places365_test_00183592.jpg +Places365_test_00183601.jpg +Places365_test_00183625.jpg +Places365_test_00183638.jpg +Places365_test_00183641.jpg +Places365_test_00183651.jpg +Places365_test_00183664.jpg +Places365_test_00183668.jpg +Places365_test_00183682.jpg +Places365_test_00183685.jpg +Places365_test_00183688.jpg +Places365_test_00183690.jpg +Places365_test_00183694.jpg +Places365_test_00183696.jpg +Places365_test_00183706.jpg +Places365_test_00183719.jpg +Places365_test_00183720.jpg +Places365_test_00183741.jpg +Places365_test_00183757.jpg +Places365_test_00183772.jpg +Places365_test_00183774.jpg +Places365_test_00183775.jpg +Places365_test_00183781.jpg +Places365_test_00183782.jpg +Places365_test_00183791.jpg +Places365_test_00183808.jpg +Places365_test_00183810.jpg +Places365_test_00183825.jpg +Places365_test_00183826.jpg +Places365_test_00183828.jpg +Places365_test_00183851.jpg +Places365_test_00183857.jpg +Places365_test_00183874.jpg +Places365_test_00183892.jpg +Places365_test_00183922.jpg +Places365_test_00183923.jpg +Places365_test_00183941.jpg +Places365_test_00183943.jpg +Places365_test_00183947.jpg +Places365_test_00183956.jpg +Places365_test_00183969.jpg +Places365_test_00183981.jpg +Places365_test_00183984.jpg +Places365_test_00184014.jpg +Places365_test_00184024.jpg +Places365_test_00184025.jpg +Places365_test_00184035.jpg +Places365_test_00184043.jpg +Places365_test_00184062.jpg +Places365_test_00184067.jpg +Places365_test_00184073.jpg +Places365_test_00184079.jpg +Places365_test_00184080.jpg +Places365_test_00184081.jpg +Places365_test_00184097.jpg +Places365_test_00184130.jpg +Places365_test_00184176.jpg +Places365_test_00184177.jpg +Places365_test_00184182.jpg +Places365_test_00184197.jpg +Places365_test_00184200.jpg +Places365_test_00184215.jpg +Places365_test_00184220.jpg +Places365_test_00184245.jpg +Places365_test_00184247.jpg +Places365_test_00184259.jpg +Places365_test_00184269.jpg +Places365_test_00184282.jpg +Places365_test_00184288.jpg +Places365_test_00184298.jpg +Places365_test_00184299.jpg +Places365_test_00184300.jpg +Places365_test_00184308.jpg +Places365_test_00184312.jpg +Places365_test_00184319.jpg +Places365_test_00184326.jpg +Places365_test_00184327.jpg +Places365_test_00184330.jpg +Places365_test_00184332.jpg +Places365_test_00184351.jpg +Places365_test_00184372.jpg +Places365_test_00184376.jpg +Places365_test_00184385.jpg +Places365_test_00184389.jpg +Places365_test_00184407.jpg +Places365_test_00184480.jpg +Places365_test_00184481.jpg +Places365_test_00184511.jpg +Places365_test_00184514.jpg +Places365_test_00184523.jpg +Places365_test_00184548.jpg +Places365_test_00184553.jpg +Places365_test_00184590.jpg +Places365_test_00184640.jpg +Places365_test_00184643.jpg +Places365_test_00184654.jpg +Places365_test_00184675.jpg +Places365_test_00184676.jpg +Places365_test_00184679.jpg +Places365_test_00184684.jpg +Places365_test_00184707.jpg +Places365_test_00184710.jpg +Places365_test_00184758.jpg +Places365_test_00184762.jpg +Places365_test_00184775.jpg +Places365_test_00184776.jpg +Places365_test_00184777.jpg +Places365_test_00184780.jpg +Places365_test_00184783.jpg +Places365_test_00184793.jpg +Places365_test_00184809.jpg +Places365_test_00184813.jpg +Places365_test_00184814.jpg +Places365_test_00184851.jpg +Places365_test_00184852.jpg +Places365_test_00184865.jpg +Places365_test_00184866.jpg +Places365_test_00184873.jpg +Places365_test_00184883.jpg +Places365_test_00184907.jpg +Places365_test_00184912.jpg +Places365_test_00184949.jpg +Places365_test_00184974.jpg +Places365_test_00184978.jpg +Places365_test_00184989.jpg +Places365_test_00185025.jpg +Places365_test_00185036.jpg +Places365_test_00185043.jpg +Places365_test_00185052.jpg +Places365_test_00185054.jpg +Places365_test_00185062.jpg +Places365_test_00185066.jpg +Places365_test_00185070.jpg +Places365_test_00185071.jpg +Places365_test_00185073.jpg +Places365_test_00185084.jpg +Places365_test_00185087.jpg +Places365_test_00185094.jpg +Places365_test_00185099.jpg +Places365_test_00185102.jpg +Places365_test_00185108.jpg +Places365_test_00185119.jpg +Places365_test_00185131.jpg +Places365_test_00185134.jpg +Places365_test_00185169.jpg +Places365_test_00185203.jpg +Places365_test_00185247.jpg +Places365_test_00185249.jpg +Places365_test_00185263.jpg +Places365_test_00185273.jpg +Places365_test_00185283.jpg +Places365_test_00185288.jpg +Places365_test_00185294.jpg +Places365_test_00185338.jpg +Places365_test_00185342.jpg +Places365_test_00185347.jpg +Places365_test_00185362.jpg +Places365_test_00185366.jpg +Places365_test_00185370.jpg +Places365_test_00185378.jpg +Places365_test_00185386.jpg +Places365_test_00185395.jpg +Places365_test_00185435.jpg +Places365_test_00185441.jpg +Places365_test_00185460.jpg +Places365_test_00185469.jpg +Places365_test_00185486.jpg +Places365_test_00185492.jpg +Places365_test_00185494.jpg +Places365_test_00185498.jpg +Places365_test_00185512.jpg +Places365_test_00185524.jpg +Places365_test_00185537.jpg +Places365_test_00185538.jpg +Places365_test_00185546.jpg +Places365_test_00185548.jpg +Places365_test_00185553.jpg +Places365_test_00185589.jpg +Places365_test_00185596.jpg +Places365_test_00185615.jpg +Places365_test_00185629.jpg +Places365_test_00185638.jpg +Places365_test_00185643.jpg +Places365_test_00185653.jpg +Places365_test_00185654.jpg +Places365_test_00185659.jpg +Places365_test_00185670.jpg +Places365_test_00185673.jpg +Places365_test_00185674.jpg +Places365_test_00185679.jpg +Places365_test_00185723.jpg +Places365_test_00185753.jpg +Places365_test_00185777.jpg +Places365_test_00185780.jpg +Places365_test_00185790.jpg +Places365_test_00185800.jpg +Places365_test_00185802.jpg +Places365_test_00185816.jpg +Places365_test_00185843.jpg +Places365_test_00185848.jpg +Places365_test_00185849.jpg +Places365_test_00185853.jpg +Places365_test_00185870.jpg +Places365_test_00185875.jpg +Places365_test_00185891.jpg +Places365_test_00185897.jpg +Places365_test_00185906.jpg +Places365_test_00185919.jpg +Places365_test_00185923.jpg +Places365_test_00185949.jpg +Places365_test_00185985.jpg +Places365_test_00186011.jpg +Places365_test_00186024.jpg +Places365_test_00186045.jpg +Places365_test_00186047.jpg +Places365_test_00186066.jpg +Places365_test_00186084.jpg +Places365_test_00186085.jpg +Places365_test_00186091.jpg +Places365_test_00186096.jpg +Places365_test_00186103.jpg +Places365_test_00186105.jpg +Places365_test_00186116.jpg +Places365_test_00186150.jpg +Places365_test_00186155.jpg +Places365_test_00186156.jpg +Places365_test_00186162.jpg +Places365_test_00186164.jpg +Places365_test_00186165.jpg +Places365_test_00186176.jpg +Places365_test_00186185.jpg +Places365_test_00186193.jpg +Places365_test_00186198.jpg +Places365_test_00186199.jpg +Places365_test_00186202.jpg +Places365_test_00186229.jpg +Places365_test_00186232.jpg +Places365_test_00186235.jpg +Places365_test_00186245.jpg +Places365_test_00186271.jpg +Places365_test_00186273.jpg +Places365_test_00186293.jpg +Places365_test_00186313.jpg +Places365_test_00186331.jpg +Places365_test_00186339.jpg +Places365_test_00186357.jpg +Places365_test_00186363.jpg +Places365_test_00186376.jpg +Places365_test_00186388.jpg +Places365_test_00186422.jpg +Places365_test_00186430.jpg +Places365_test_00186433.jpg +Places365_test_00186447.jpg +Places365_test_00186452.jpg +Places365_test_00186466.jpg +Places365_test_00186505.jpg +Places365_test_00186525.jpg +Places365_test_00186540.jpg +Places365_test_00186559.jpg +Places365_test_00186569.jpg +Places365_test_00186575.jpg +Places365_test_00186588.jpg +Places365_test_00186602.jpg +Places365_test_00186613.jpg +Places365_test_00186629.jpg +Places365_test_00186635.jpg +Places365_test_00186637.jpg +Places365_test_00186652.jpg +Places365_test_00186658.jpg +Places365_test_00186686.jpg +Places365_test_00186714.jpg +Places365_test_00186733.jpg +Places365_test_00186742.jpg +Places365_test_00186753.jpg +Places365_test_00186770.jpg +Places365_test_00186774.jpg +Places365_test_00186776.jpg +Places365_test_00186790.jpg +Places365_test_00186794.jpg +Places365_test_00186798.jpg +Places365_test_00186825.jpg +Places365_test_00186835.jpg +Places365_test_00186837.jpg +Places365_test_00186844.jpg +Places365_test_00186851.jpg +Places365_test_00186859.jpg +Places365_test_00186860.jpg +Places365_test_00186867.jpg +Places365_test_00186875.jpg +Places365_test_00186881.jpg +Places365_test_00186901.jpg +Places365_test_00186902.jpg +Places365_test_00186911.jpg +Places365_test_00186921.jpg +Places365_test_00186934.jpg +Places365_test_00186946.jpg +Places365_test_00186959.jpg +Places365_test_00186980.jpg +Places365_test_00186990.jpg +Places365_test_00187006.jpg +Places365_test_00187027.jpg +Places365_test_00187034.jpg +Places365_test_00187059.jpg +Places365_test_00187061.jpg +Places365_test_00187065.jpg +Places365_test_00187077.jpg +Places365_test_00187079.jpg +Places365_test_00187105.jpg +Places365_test_00187107.jpg +Places365_test_00187108.jpg +Places365_test_00187138.jpg +Places365_test_00187140.jpg +Places365_test_00187161.jpg +Places365_test_00187164.jpg +Places365_test_00187183.jpg +Places365_test_00187187.jpg +Places365_test_00187208.jpg +Places365_test_00187213.jpg +Places365_test_00187214.jpg +Places365_test_00187222.jpg +Places365_test_00187225.jpg +Places365_test_00187226.jpg +Places365_test_00187237.jpg +Places365_test_00187250.jpg +Places365_test_00187263.jpg +Places365_test_00187276.jpg +Places365_test_00187296.jpg +Places365_test_00187310.jpg +Places365_test_00187318.jpg +Places365_test_00187327.jpg +Places365_test_00187346.jpg +Places365_test_00187354.jpg +Places365_test_00187355.jpg +Places365_test_00187364.jpg +Places365_test_00187369.jpg +Places365_test_00187386.jpg +Places365_test_00187394.jpg +Places365_test_00187408.jpg +Places365_test_00187410.jpg +Places365_test_00187414.jpg +Places365_test_00187432.jpg +Places365_test_00187445.jpg +Places365_test_00187489.jpg +Places365_test_00187490.jpg +Places365_test_00187492.jpg +Places365_test_00187499.jpg +Places365_test_00187515.jpg +Places365_test_00187526.jpg +Places365_test_00187532.jpg +Places365_test_00187537.jpg +Places365_test_00187547.jpg +Places365_test_00187556.jpg +Places365_test_00187557.jpg +Places365_test_00187568.jpg +Places365_test_00187606.jpg +Places365_test_00187621.jpg +Places365_test_00187628.jpg +Places365_test_00187666.jpg +Places365_test_00187672.jpg +Places365_test_00187675.jpg +Places365_test_00187689.jpg +Places365_test_00187706.jpg +Places365_test_00187707.jpg +Places365_test_00187718.jpg +Places365_test_00187725.jpg +Places365_test_00187729.jpg +Places365_test_00187737.jpg +Places365_test_00187765.jpg +Places365_test_00187766.jpg +Places365_test_00187767.jpg +Places365_test_00187769.jpg +Places365_test_00187770.jpg +Places365_test_00187786.jpg +Places365_test_00187792.jpg +Places365_test_00187801.jpg +Places365_test_00187807.jpg +Places365_test_00187817.jpg +Places365_test_00187821.jpg +Places365_test_00187844.jpg +Places365_test_00187859.jpg +Places365_test_00187873.jpg +Places365_test_00187875.jpg +Places365_test_00187895.jpg +Places365_test_00187907.jpg +Places365_test_00187917.jpg +Places365_test_00187920.jpg +Places365_test_00187925.jpg +Places365_test_00187931.jpg +Places365_test_00187936.jpg +Places365_test_00187947.jpg +Places365_test_00187958.jpg +Places365_test_00187981.jpg +Places365_test_00187984.jpg +Places365_test_00187985.jpg +Places365_test_00187997.jpg +Places365_test_00187999.jpg +Places365_test_00188003.jpg +Places365_test_00188006.jpg +Places365_test_00188027.jpg +Places365_test_00188037.jpg +Places365_test_00188038.jpg +Places365_test_00188039.jpg +Places365_test_00188056.jpg +Places365_test_00188063.jpg +Places365_test_00188065.jpg +Places365_test_00188068.jpg +Places365_test_00188097.jpg +Places365_test_00188109.jpg +Places365_test_00188110.jpg +Places365_test_00188112.jpg +Places365_test_00188136.jpg +Places365_test_00188142.jpg +Places365_test_00188173.jpg +Places365_test_00188175.jpg +Places365_test_00188178.jpg +Places365_test_00188183.jpg +Places365_test_00188198.jpg +Places365_test_00188202.jpg +Places365_test_00188208.jpg +Places365_test_00188227.jpg +Places365_test_00188234.jpg +Places365_test_00188235.jpg +Places365_test_00188250.jpg +Places365_test_00188269.jpg +Places365_test_00188271.jpg +Places365_test_00188285.jpg +Places365_test_00188335.jpg +Places365_test_00188339.jpg +Places365_test_00188363.jpg +Places365_test_00188371.jpg +Places365_test_00188373.jpg +Places365_test_00188374.jpg +Places365_test_00188377.jpg +Places365_test_00188386.jpg +Places365_test_00188399.jpg +Places365_test_00188404.jpg +Places365_test_00188416.jpg +Places365_test_00188420.jpg +Places365_test_00188468.jpg +Places365_test_00188471.jpg +Places365_test_00188483.jpg +Places365_test_00188493.jpg +Places365_test_00188496.jpg +Places365_test_00188504.jpg +Places365_test_00188517.jpg +Places365_test_00188520.jpg +Places365_test_00188528.jpg +Places365_test_00188532.jpg +Places365_test_00188555.jpg +Places365_test_00188558.jpg +Places365_test_00188559.jpg +Places365_test_00188573.jpg +Places365_test_00188579.jpg +Places365_test_00188586.jpg +Places365_test_00188591.jpg +Places365_test_00188603.jpg +Places365_test_00188613.jpg +Places365_test_00188629.jpg +Places365_test_00188653.jpg +Places365_test_00188654.jpg +Places365_test_00188667.jpg +Places365_test_00188669.jpg +Places365_test_00188670.jpg +Places365_test_00188675.jpg +Places365_test_00188680.jpg +Places365_test_00188690.jpg +Places365_test_00188704.jpg +Places365_test_00188724.jpg +Places365_test_00188734.jpg +Places365_test_00188737.jpg +Places365_test_00188743.jpg +Places365_test_00188756.jpg +Places365_test_00188768.jpg +Places365_test_00188773.jpg +Places365_test_00188781.jpg +Places365_test_00188792.jpg +Places365_test_00188799.jpg +Places365_test_00188802.jpg +Places365_test_00188826.jpg +Places365_test_00188832.jpg +Places365_test_00188834.jpg +Places365_test_00188843.jpg +Places365_test_00188847.jpg +Places365_test_00188848.jpg +Places365_test_00188859.jpg +Places365_test_00188880.jpg +Places365_test_00188888.jpg +Places365_test_00188909.jpg +Places365_test_00188929.jpg +Places365_test_00188932.jpg +Places365_test_00188946.jpg +Places365_test_00188964.jpg +Places365_test_00188966.jpg +Places365_test_00188978.jpg +Places365_test_00188980.jpg +Places365_test_00188997.jpg +Places365_test_00188999.jpg +Places365_test_00189001.jpg +Places365_test_00189007.jpg +Places365_test_00189022.jpg +Places365_test_00189028.jpg +Places365_test_00189048.jpg +Places365_test_00189058.jpg +Places365_test_00189062.jpg +Places365_test_00189071.jpg +Places365_test_00189084.jpg +Places365_test_00189101.jpg +Places365_test_00189113.jpg +Places365_test_00189118.jpg +Places365_test_00189129.jpg +Places365_test_00189143.jpg +Places365_test_00189147.jpg +Places365_test_00189150.jpg +Places365_test_00189152.jpg +Places365_test_00189154.jpg +Places365_test_00189157.jpg +Places365_test_00189172.jpg +Places365_test_00189173.jpg +Places365_test_00189177.jpg +Places365_test_00189181.jpg +Places365_test_00189183.jpg +Places365_test_00189187.jpg +Places365_test_00189189.jpg +Places365_test_00189198.jpg +Places365_test_00189200.jpg +Places365_test_00189212.jpg +Places365_test_00189226.jpg +Places365_test_00189246.jpg +Places365_test_00189247.jpg +Places365_test_00189273.jpg +Places365_test_00189315.jpg +Places365_test_00189318.jpg +Places365_test_00189355.jpg +Places365_test_00189370.jpg +Places365_test_00189375.jpg +Places365_test_00189379.jpg +Places365_test_00189380.jpg +Places365_test_00189382.jpg +Places365_test_00189392.jpg +Places365_test_00189411.jpg +Places365_test_00189414.jpg +Places365_test_00189423.jpg +Places365_test_00189424.jpg +Places365_test_00189431.jpg +Places365_test_00189432.jpg +Places365_test_00189435.jpg +Places365_test_00189437.jpg +Places365_test_00189469.jpg +Places365_test_00189472.jpg +Places365_test_00189487.jpg +Places365_test_00189492.jpg +Places365_test_00189512.jpg +Places365_test_00189517.jpg +Places365_test_00189566.jpg +Places365_test_00189582.jpg +Places365_test_00189608.jpg +Places365_test_00189609.jpg +Places365_test_00189610.jpg +Places365_test_00189614.jpg +Places365_test_00189625.jpg +Places365_test_00189632.jpg +Places365_test_00189643.jpg +Places365_test_00189661.jpg +Places365_test_00189673.jpg +Places365_test_00189676.jpg +Places365_test_00189679.jpg +Places365_test_00189686.jpg +Places365_test_00189689.jpg +Places365_test_00189699.jpg +Places365_test_00189703.jpg +Places365_test_00189711.jpg +Places365_test_00189729.jpg +Places365_test_00189735.jpg +Places365_test_00189751.jpg +Places365_test_00189772.jpg +Places365_test_00189791.jpg +Places365_test_00189792.jpg +Places365_test_00189804.jpg +Places365_test_00189805.jpg +Places365_test_00189809.jpg +Places365_test_00189820.jpg +Places365_test_00189827.jpg +Places365_test_00189829.jpg +Places365_test_00189845.jpg +Places365_test_00189878.jpg +Places365_test_00189883.jpg +Places365_test_00189888.jpg +Places365_test_00189889.jpg +Places365_test_00189920.jpg +Places365_test_00189923.jpg +Places365_test_00189932.jpg +Places365_test_00189944.jpg +Places365_test_00189954.jpg +Places365_test_00189978.jpg +Places365_test_00189993.jpg +Places365_test_00190030.jpg +Places365_test_00190034.jpg +Places365_test_00190040.jpg +Places365_test_00190045.jpg +Places365_test_00190050.jpg +Places365_test_00190064.jpg +Places365_test_00190071.jpg +Places365_test_00190091.jpg +Places365_test_00190097.jpg +Places365_test_00190100.jpg +Places365_test_00190112.jpg +Places365_test_00190119.jpg +Places365_test_00190123.jpg +Places365_test_00190128.jpg +Places365_test_00190150.jpg +Places365_test_00190152.jpg +Places365_test_00190160.jpg +Places365_test_00190161.jpg +Places365_test_00190165.jpg +Places365_test_00190167.jpg +Places365_test_00190173.jpg +Places365_test_00190177.jpg +Places365_test_00190191.jpg +Places365_test_00190200.jpg +Places365_test_00190209.jpg +Places365_test_00190211.jpg +Places365_test_00190229.jpg +Places365_test_00190234.jpg +Places365_test_00190241.jpg +Places365_test_00190248.jpg +Places365_test_00190252.jpg +Places365_test_00190257.jpg +Places365_test_00190284.jpg +Places365_test_00190301.jpg +Places365_test_00190334.jpg +Places365_test_00190352.jpg +Places365_test_00190375.jpg +Places365_test_00190380.jpg +Places365_test_00190386.jpg +Places365_test_00190387.jpg +Places365_test_00190408.jpg +Places365_test_00190410.jpg +Places365_test_00190413.jpg +Places365_test_00190422.jpg +Places365_test_00190463.jpg +Places365_test_00190468.jpg +Places365_test_00190479.jpg +Places365_test_00190489.jpg +Places365_test_00190496.jpg +Places365_test_00190500.jpg +Places365_test_00190508.jpg +Places365_test_00190509.jpg +Places365_test_00190524.jpg +Places365_test_00190527.jpg +Places365_test_00190530.jpg +Places365_test_00190536.jpg +Places365_test_00190545.jpg +Places365_test_00190580.jpg +Places365_test_00190584.jpg +Places365_test_00190588.jpg +Places365_test_00190595.jpg +Places365_test_00190598.jpg +Places365_test_00190604.jpg +Places365_test_00190606.jpg +Places365_test_00190625.jpg +Places365_test_00190633.jpg +Places365_test_00190635.jpg +Places365_test_00190636.jpg +Places365_test_00190637.jpg +Places365_test_00190662.jpg +Places365_test_00190671.jpg +Places365_test_00190676.jpg +Places365_test_00190695.jpg +Places365_test_00190697.jpg +Places365_test_00190704.jpg +Places365_test_00190711.jpg +Places365_test_00190745.jpg +Places365_test_00190749.jpg +Places365_test_00190762.jpg +Places365_test_00190763.jpg +Places365_test_00190769.jpg +Places365_test_00190776.jpg +Places365_test_00190777.jpg +Places365_test_00190781.jpg +Places365_test_00190793.jpg +Places365_test_00190808.jpg +Places365_test_00190813.jpg +Places365_test_00190818.jpg +Places365_test_00190819.jpg +Places365_test_00190820.jpg +Places365_test_00190821.jpg +Places365_test_00190840.jpg +Places365_test_00190844.jpg +Places365_test_00190846.jpg +Places365_test_00190851.jpg +Places365_test_00190855.jpg +Places365_test_00190856.jpg +Places365_test_00190869.jpg +Places365_test_00190881.jpg +Places365_test_00190885.jpg +Places365_test_00190900.jpg +Places365_test_00190903.jpg +Places365_test_00190917.jpg +Places365_test_00190919.jpg +Places365_test_00190923.jpg +Places365_test_00190927.jpg +Places365_test_00190935.jpg +Places365_test_00190956.jpg +Places365_test_00190959.jpg +Places365_test_00190965.jpg +Places365_test_00190968.jpg +Places365_test_00190972.jpg +Places365_test_00190983.jpg +Places365_test_00190988.jpg +Places365_test_00190990.jpg +Places365_test_00190997.jpg +Places365_test_00191004.jpg +Places365_test_00191007.jpg +Places365_test_00191013.jpg +Places365_test_00191020.jpg +Places365_test_00191031.jpg +Places365_test_00191043.jpg +Places365_test_00191059.jpg +Places365_test_00191062.jpg +Places365_test_00191073.jpg +Places365_test_00191077.jpg +Places365_test_00191091.jpg +Places365_test_00191098.jpg +Places365_test_00191110.jpg +Places365_test_00191112.jpg +Places365_test_00191128.jpg +Places365_test_00191133.jpg +Places365_test_00191146.jpg +Places365_test_00191155.jpg +Places365_test_00191191.jpg +Places365_test_00191193.jpg +Places365_test_00191206.jpg +Places365_test_00191231.jpg +Places365_test_00191233.jpg +Places365_test_00191241.jpg +Places365_test_00191246.jpg +Places365_test_00191247.jpg +Places365_test_00191267.jpg +Places365_test_00191273.jpg +Places365_test_00191274.jpg +Places365_test_00191284.jpg +Places365_test_00191294.jpg +Places365_test_00191299.jpg +Places365_test_00191306.jpg +Places365_test_00191323.jpg +Places365_test_00191350.jpg +Places365_test_00191363.jpg +Places365_test_00191365.jpg +Places365_test_00191399.jpg +Places365_test_00191408.jpg +Places365_test_00191410.jpg +Places365_test_00191426.jpg +Places365_test_00191441.jpg +Places365_test_00191467.jpg +Places365_test_00191469.jpg +Places365_test_00191476.jpg +Places365_test_00191482.jpg +Places365_test_00191488.jpg +Places365_test_00191495.jpg +Places365_test_00191508.jpg +Places365_test_00191516.jpg +Places365_test_00191525.jpg +Places365_test_00191536.jpg +Places365_test_00191542.jpg +Places365_test_00191555.jpg +Places365_test_00191557.jpg +Places365_test_00191581.jpg +Places365_test_00191590.jpg +Places365_test_00191595.jpg +Places365_test_00191602.jpg +Places365_test_00191611.jpg +Places365_test_00191626.jpg +Places365_test_00191630.jpg +Places365_test_00191645.jpg +Places365_test_00191667.jpg +Places365_test_00191675.jpg +Places365_test_00191688.jpg +Places365_test_00191693.jpg +Places365_test_00191702.jpg +Places365_test_00191703.jpg +Places365_test_00191707.jpg +Places365_test_00191710.jpg +Places365_test_00191734.jpg +Places365_test_00191735.jpg +Places365_test_00191741.jpg +Places365_test_00191747.jpg +Places365_test_00191752.jpg +Places365_test_00191753.jpg +Places365_test_00191760.jpg +Places365_test_00191766.jpg +Places365_test_00191770.jpg +Places365_test_00191789.jpg +Places365_test_00191814.jpg +Places365_test_00191817.jpg +Places365_test_00191819.jpg +Places365_test_00191836.jpg +Places365_test_00191855.jpg +Places365_test_00191859.jpg +Places365_test_00191863.jpg +Places365_test_00191877.jpg +Places365_test_00191878.jpg +Places365_test_00191879.jpg +Places365_test_00191887.jpg +Places365_test_00191896.jpg +Places365_test_00191899.jpg +Places365_test_00191900.jpg +Places365_test_00191916.jpg +Places365_test_00191920.jpg +Places365_test_00191943.jpg +Places365_test_00191946.jpg +Places365_test_00191956.jpg +Places365_test_00191968.jpg +Places365_test_00191993.jpg +Places365_test_00192009.jpg +Places365_test_00192048.jpg +Places365_test_00192059.jpg +Places365_test_00192064.jpg +Places365_test_00192065.jpg +Places365_test_00192067.jpg +Places365_test_00192070.jpg +Places365_test_00192085.jpg +Places365_test_00192089.jpg +Places365_test_00192096.jpg +Places365_test_00192101.jpg +Places365_test_00192133.jpg +Places365_test_00192139.jpg +Places365_test_00192141.jpg +Places365_test_00192142.jpg +Places365_test_00192152.jpg +Places365_test_00192175.jpg +Places365_test_00192192.jpg +Places365_test_00192224.jpg +Places365_test_00192235.jpg +Places365_test_00192238.jpg +Places365_test_00192253.jpg +Places365_test_00192257.jpg +Places365_test_00192259.jpg +Places365_test_00192270.jpg +Places365_test_00192273.jpg +Places365_test_00192274.jpg +Places365_test_00192276.jpg +Places365_test_00192292.jpg +Places365_test_00192307.jpg +Places365_test_00192325.jpg +Places365_test_00192327.jpg +Places365_test_00192336.jpg +Places365_test_00192350.jpg +Places365_test_00192352.jpg +Places365_test_00192361.jpg +Places365_test_00192382.jpg +Places365_test_00192412.jpg +Places365_test_00192413.jpg +Places365_test_00192418.jpg +Places365_test_00192423.jpg +Places365_test_00192425.jpg +Places365_test_00192434.jpg +Places365_test_00192442.jpg +Places365_test_00192453.jpg +Places365_test_00192457.jpg +Places365_test_00192464.jpg +Places365_test_00192543.jpg +Places365_test_00192583.jpg +Places365_test_00192603.jpg +Places365_test_00192614.jpg +Places365_test_00192617.jpg +Places365_test_00192618.jpg +Places365_test_00192619.jpg +Places365_test_00192633.jpg +Places365_test_00192659.jpg +Places365_test_00192677.jpg +Places365_test_00192697.jpg +Places365_test_00192714.jpg +Places365_test_00192717.jpg +Places365_test_00192732.jpg +Places365_test_00192739.jpg +Places365_test_00192743.jpg +Places365_test_00192744.jpg +Places365_test_00192751.jpg +Places365_test_00192752.jpg +Places365_test_00192801.jpg +Places365_test_00192835.jpg +Places365_test_00192869.jpg +Places365_test_00192876.jpg +Places365_test_00192880.jpg +Places365_test_00192897.jpg +Places365_test_00192899.jpg +Places365_test_00192900.jpg +Places365_test_00192907.jpg +Places365_test_00192908.jpg +Places365_test_00192910.jpg +Places365_test_00192928.jpg +Places365_test_00192936.jpg +Places365_test_00192943.jpg +Places365_test_00192967.jpg +Places365_test_00192980.jpg +Places365_test_00192991.jpg +Places365_test_00192992.jpg +Places365_test_00192994.jpg +Places365_test_00193014.jpg +Places365_test_00193023.jpg +Places365_test_00193031.jpg +Places365_test_00193052.jpg +Places365_test_00193066.jpg +Places365_test_00193077.jpg +Places365_test_00193097.jpg +Places365_test_00193103.jpg +Places365_test_00193122.jpg +Places365_test_00193123.jpg +Places365_test_00193141.jpg +Places365_test_00193153.jpg +Places365_test_00193156.jpg +Places365_test_00193164.jpg +Places365_test_00193169.jpg +Places365_test_00193172.jpg +Places365_test_00193180.jpg +Places365_test_00193185.jpg +Places365_test_00193205.jpg +Places365_test_00193231.jpg +Places365_test_00193260.jpg +Places365_test_00193275.jpg +Places365_test_00193276.jpg +Places365_test_00193280.jpg +Places365_test_00193290.jpg +Places365_test_00193291.jpg +Places365_test_00193307.jpg +Places365_test_00193309.jpg +Places365_test_00193337.jpg +Places365_test_00193344.jpg +Places365_test_00193348.jpg +Places365_test_00193365.jpg +Places365_test_00193367.jpg +Places365_test_00193368.jpg +Places365_test_00193369.jpg +Places365_test_00193393.jpg +Places365_test_00193425.jpg +Places365_test_00193445.jpg +Places365_test_00193463.jpg +Places365_test_00193481.jpg +Places365_test_00193486.jpg +Places365_test_00193512.jpg +Places365_test_00193542.jpg +Places365_test_00193557.jpg +Places365_test_00193559.jpg +Places365_test_00193563.jpg +Places365_test_00193583.jpg +Places365_test_00193590.jpg +Places365_test_00193603.jpg +Places365_test_00193618.jpg +Places365_test_00193626.jpg +Places365_test_00193636.jpg +Places365_test_00193666.jpg +Places365_test_00193668.jpg +Places365_test_00193671.jpg +Places365_test_00193702.jpg +Places365_test_00193703.jpg +Places365_test_00193715.jpg +Places365_test_00193716.jpg +Places365_test_00193724.jpg +Places365_test_00193730.jpg +Places365_test_00193737.jpg +Places365_test_00193752.jpg +Places365_test_00193768.jpg +Places365_test_00193769.jpg +Places365_test_00193774.jpg +Places365_test_00193794.jpg +Places365_test_00193820.jpg +Places365_test_00193832.jpg +Places365_test_00193833.jpg +Places365_test_00193847.jpg +Places365_test_00193863.jpg +Places365_test_00193867.jpg +Places365_test_00193877.jpg +Places365_test_00193895.jpg +Places365_test_00193903.jpg +Places365_test_00193921.jpg +Places365_test_00193924.jpg +Places365_test_00193948.jpg +Places365_test_00193955.jpg +Places365_test_00193966.jpg +Places365_test_00193982.jpg +Places365_test_00193997.jpg +Places365_test_00194020.jpg +Places365_test_00194071.jpg +Places365_test_00194073.jpg +Places365_test_00194075.jpg +Places365_test_00194109.jpg +Places365_test_00194123.jpg +Places365_test_00194135.jpg +Places365_test_00194137.jpg +Places365_test_00194164.jpg +Places365_test_00194167.jpg +Places365_test_00194173.jpg +Places365_test_00194175.jpg +Places365_test_00194184.jpg +Places365_test_00194189.jpg +Places365_test_00194197.jpg +Places365_test_00194205.jpg +Places365_test_00194226.jpg +Places365_test_00194231.jpg +Places365_test_00194240.jpg +Places365_test_00194244.jpg +Places365_test_00194248.jpg +Places365_test_00194250.jpg +Places365_test_00194260.jpg +Places365_test_00194271.jpg +Places365_test_00194272.jpg +Places365_test_00194273.jpg +Places365_test_00194277.jpg +Places365_test_00194284.jpg +Places365_test_00194296.jpg +Places365_test_00194318.jpg +Places365_test_00194326.jpg +Places365_test_00194332.jpg +Places365_test_00194338.jpg +Places365_test_00194345.jpg +Places365_test_00194351.jpg +Places365_test_00194355.jpg +Places365_test_00194393.jpg +Places365_test_00194403.jpg +Places365_test_00194422.jpg +Places365_test_00194424.jpg +Places365_test_00194439.jpg +Places365_test_00194441.jpg +Places365_test_00194452.jpg +Places365_test_00194455.jpg +Places365_test_00194463.jpg +Places365_test_00194485.jpg +Places365_test_00194493.jpg +Places365_test_00194508.jpg +Places365_test_00194528.jpg +Places365_test_00194530.jpg +Places365_test_00194533.jpg +Places365_test_00194538.jpg +Places365_test_00194574.jpg +Places365_test_00194580.jpg +Places365_test_00194588.jpg +Places365_test_00194591.jpg +Places365_test_00194601.jpg +Places365_test_00194630.jpg +Places365_test_00194636.jpg +Places365_test_00194668.jpg +Places365_test_00194687.jpg +Places365_test_00194731.jpg +Places365_test_00194738.jpg +Places365_test_00194740.jpg +Places365_test_00194751.jpg +Places365_test_00194756.jpg +Places365_test_00194766.jpg +Places365_test_00194826.jpg +Places365_test_00194830.jpg +Places365_test_00194837.jpg +Places365_test_00194860.jpg +Places365_test_00194888.jpg +Places365_test_00194895.jpg +Places365_test_00194914.jpg +Places365_test_00194922.jpg +Places365_test_00194942.jpg +Places365_test_00194962.jpg +Places365_test_00194964.jpg +Places365_test_00194979.jpg +Places365_test_00194982.jpg +Places365_test_00194991.jpg +Places365_test_00195010.jpg +Places365_test_00195026.jpg +Places365_test_00195027.jpg +Places365_test_00195038.jpg +Places365_test_00195047.jpg +Places365_test_00195053.jpg +Places365_test_00195057.jpg +Places365_test_00195059.jpg +Places365_test_00195098.jpg +Places365_test_00195101.jpg +Places365_test_00195107.jpg +Places365_test_00195192.jpg +Places365_test_00195211.jpg +Places365_test_00195224.jpg +Places365_test_00195226.jpg +Places365_test_00195232.jpg +Places365_test_00195236.jpg +Places365_test_00195247.jpg +Places365_test_00195259.jpg +Places365_test_00195267.jpg +Places365_test_00195287.jpg +Places365_test_00195291.jpg +Places365_test_00195309.jpg +Places365_test_00195322.jpg +Places365_test_00195328.jpg +Places365_test_00195338.jpg +Places365_test_00195341.jpg +Places365_test_00195379.jpg +Places365_test_00195383.jpg +Places365_test_00195390.jpg +Places365_test_00195394.jpg +Places365_test_00195430.jpg +Places365_test_00195450.jpg +Places365_test_00195453.jpg +Places365_test_00195457.jpg +Places365_test_00195460.jpg +Places365_test_00195475.jpg +Places365_test_00195476.jpg +Places365_test_00195480.jpg +Places365_test_00195481.jpg +Places365_test_00195494.jpg +Places365_test_00195503.jpg +Places365_test_00195507.jpg +Places365_test_00195561.jpg +Places365_test_00195575.jpg +Places365_test_00195584.jpg +Places365_test_00195604.jpg +Places365_test_00195613.jpg +Places365_test_00195630.jpg +Places365_test_00195634.jpg +Places365_test_00195641.jpg +Places365_test_00195658.jpg +Places365_test_00195679.jpg +Places365_test_00195687.jpg +Places365_test_00195700.jpg +Places365_test_00195703.jpg +Places365_test_00195707.jpg +Places365_test_00195709.jpg +Places365_test_00195710.jpg +Places365_test_00195715.jpg +Places365_test_00195717.jpg +Places365_test_00195720.jpg +Places365_test_00195759.jpg +Places365_test_00195767.jpg +Places365_test_00195781.jpg +Places365_test_00195817.jpg +Places365_test_00195823.jpg +Places365_test_00195830.jpg +Places365_test_00195838.jpg +Places365_test_00195852.jpg +Places365_test_00195859.jpg +Places365_test_00195864.jpg +Places365_test_00195871.jpg +Places365_test_00195890.jpg +Places365_test_00195904.jpg +Places365_test_00195912.jpg +Places365_test_00195936.jpg +Places365_test_00195953.jpg +Places365_test_00195959.jpg +Places365_test_00195964.jpg +Places365_test_00195981.jpg +Places365_test_00195985.jpg +Places365_test_00196017.jpg +Places365_test_00196019.jpg +Places365_test_00196024.jpg +Places365_test_00196032.jpg +Places365_test_00196048.jpg +Places365_test_00196051.jpg +Places365_test_00196063.jpg +Places365_test_00196066.jpg +Places365_test_00196083.jpg +Places365_test_00196109.jpg +Places365_test_00196151.jpg +Places365_test_00196159.jpg +Places365_test_00196161.jpg +Places365_test_00196164.jpg +Places365_test_00196166.jpg +Places365_test_00196167.jpg +Places365_test_00196172.jpg +Places365_test_00196180.jpg +Places365_test_00196182.jpg +Places365_test_00196198.jpg +Places365_test_00196199.jpg +Places365_test_00196200.jpg +Places365_test_00196224.jpg +Places365_test_00196226.jpg +Places365_test_00196227.jpg +Places365_test_00196229.jpg +Places365_test_00196231.jpg +Places365_test_00196232.jpg +Places365_test_00196239.jpg +Places365_test_00196242.jpg +Places365_test_00196248.jpg +Places365_test_00196268.jpg +Places365_test_00196280.jpg +Places365_test_00196284.jpg +Places365_test_00196303.jpg +Places365_test_00196335.jpg +Places365_test_00196343.jpg +Places365_test_00196350.jpg +Places365_test_00196361.jpg +Places365_test_00196409.jpg +Places365_test_00196415.jpg +Places365_test_00196423.jpg +Places365_test_00196424.jpg +Places365_test_00196427.jpg +Places365_test_00196434.jpg +Places365_test_00196438.jpg +Places365_test_00196448.jpg +Places365_test_00196457.jpg +Places365_test_00196465.jpg +Places365_test_00196469.jpg +Places365_test_00196492.jpg +Places365_test_00196510.jpg +Places365_test_00196531.jpg +Places365_test_00196535.jpg +Places365_test_00196549.jpg +Places365_test_00196555.jpg +Places365_test_00196569.jpg +Places365_test_00196574.jpg +Places365_test_00196598.jpg +Places365_test_00196612.jpg +Places365_test_00196616.jpg +Places365_test_00196619.jpg +Places365_test_00196627.jpg +Places365_test_00196632.jpg +Places365_test_00196684.jpg +Places365_test_00196691.jpg +Places365_test_00196694.jpg +Places365_test_00196703.jpg +Places365_test_00196744.jpg +Places365_test_00196774.jpg +Places365_test_00196787.jpg +Places365_test_00196807.jpg +Places365_test_00196841.jpg +Places365_test_00196856.jpg +Places365_test_00196863.jpg +Places365_test_00196865.jpg +Places365_test_00196888.jpg +Places365_test_00196894.jpg +Places365_test_00196896.jpg +Places365_test_00196900.jpg +Places365_test_00196908.jpg +Places365_test_00196912.jpg +Places365_test_00196926.jpg +Places365_test_00196929.jpg +Places365_test_00196936.jpg +Places365_test_00196939.jpg +Places365_test_00196945.jpg +Places365_test_00196953.jpg +Places365_test_00196967.jpg +Places365_test_00196986.jpg +Places365_test_00197002.jpg +Places365_test_00197006.jpg +Places365_test_00197009.jpg +Places365_test_00197011.jpg +Places365_test_00197015.jpg +Places365_test_00197016.jpg +Places365_test_00197017.jpg +Places365_test_00197019.jpg +Places365_test_00197020.jpg +Places365_test_00197021.jpg +Places365_test_00197038.jpg +Places365_test_00197079.jpg +Places365_test_00197110.jpg +Places365_test_00197115.jpg +Places365_test_00197135.jpg +Places365_test_00197141.jpg +Places365_test_00197144.jpg +Places365_test_00197171.jpg +Places365_test_00197183.jpg +Places365_test_00197184.jpg +Places365_test_00197191.jpg +Places365_test_00197216.jpg +Places365_test_00197221.jpg +Places365_test_00197223.jpg +Places365_test_00197238.jpg +Places365_test_00197283.jpg +Places365_test_00197287.jpg +Places365_test_00197288.jpg +Places365_test_00197297.jpg +Places365_test_00197298.jpg +Places365_test_00197318.jpg +Places365_test_00197324.jpg +Places365_test_00197340.jpg +Places365_test_00197342.jpg +Places365_test_00197349.jpg +Places365_test_00197378.jpg +Places365_test_00197390.jpg +Places365_test_00197412.jpg +Places365_test_00197416.jpg +Places365_test_00197429.jpg +Places365_test_00197481.jpg +Places365_test_00197482.jpg +Places365_test_00197486.jpg +Places365_test_00197488.jpg +Places365_test_00197490.jpg +Places365_test_00197492.jpg +Places365_test_00197502.jpg +Places365_test_00197511.jpg +Places365_test_00197521.jpg +Places365_test_00197523.jpg +Places365_test_00197529.jpg +Places365_test_00197531.jpg +Places365_test_00197551.jpg +Places365_test_00197585.jpg +Places365_test_00197594.jpg +Places365_test_00197611.jpg +Places365_test_00197632.jpg +Places365_test_00197639.jpg +Places365_test_00197640.jpg +Places365_test_00197644.jpg +Places365_test_00197647.jpg +Places365_test_00197651.jpg +Places365_test_00197664.jpg +Places365_test_00197668.jpg +Places365_test_00197671.jpg +Places365_test_00197673.jpg +Places365_test_00197674.jpg +Places365_test_00197697.jpg +Places365_test_00197714.jpg +Places365_test_00197720.jpg +Places365_test_00197729.jpg +Places365_test_00197741.jpg +Places365_test_00197747.jpg +Places365_test_00197755.jpg +Places365_test_00197762.jpg +Places365_test_00197770.jpg +Places365_test_00197785.jpg +Places365_test_00197790.jpg +Places365_test_00197850.jpg +Places365_test_00197871.jpg +Places365_test_00197880.jpg +Places365_test_00197884.jpg +Places365_test_00197885.jpg +Places365_test_00197890.jpg +Places365_test_00197892.jpg +Places365_test_00197898.jpg +Places365_test_00197926.jpg +Places365_test_00197954.jpg +Places365_test_00197957.jpg +Places365_test_00197958.jpg +Places365_test_00197965.jpg +Places365_test_00197970.jpg +Places365_test_00197973.jpg +Places365_test_00198009.jpg +Places365_test_00198021.jpg +Places365_test_00198034.jpg +Places365_test_00198049.jpg +Places365_test_00198050.jpg +Places365_test_00198065.jpg +Places365_test_00198082.jpg +Places365_test_00198104.jpg +Places365_test_00198107.jpg +Places365_test_00198123.jpg +Places365_test_00198140.jpg +Places365_test_00198144.jpg +Places365_test_00198149.jpg +Places365_test_00198152.jpg +Places365_test_00198173.jpg +Places365_test_00198180.jpg +Places365_test_00198185.jpg +Places365_test_00198190.jpg +Places365_test_00198205.jpg +Places365_test_00198209.jpg +Places365_test_00198222.jpg +Places365_test_00198232.jpg +Places365_test_00198243.jpg +Places365_test_00198252.jpg +Places365_test_00198278.jpg +Places365_test_00198282.jpg +Places365_test_00198290.jpg +Places365_test_00198298.jpg +Places365_test_00198321.jpg +Places365_test_00198323.jpg +Places365_test_00198326.jpg +Places365_test_00198334.jpg +Places365_test_00198340.jpg +Places365_test_00198367.jpg +Places365_test_00198371.jpg +Places365_test_00198397.jpg +Places365_test_00198398.jpg +Places365_test_00198415.jpg +Places365_test_00198430.jpg +Places365_test_00198436.jpg +Places365_test_00198438.jpg +Places365_test_00198439.jpg +Places365_test_00198444.jpg +Places365_test_00198452.jpg +Places365_test_00198454.jpg +Places365_test_00198462.jpg +Places365_test_00198484.jpg +Places365_test_00198491.jpg +Places365_test_00198495.jpg +Places365_test_00198506.jpg +Places365_test_00198542.jpg +Places365_test_00198545.jpg +Places365_test_00198556.jpg +Places365_test_00198560.jpg +Places365_test_00198565.jpg +Places365_test_00198570.jpg +Places365_test_00198571.jpg +Places365_test_00198585.jpg +Places365_test_00198590.jpg +Places365_test_00198591.jpg +Places365_test_00198602.jpg +Places365_test_00198608.jpg +Places365_test_00198646.jpg +Places365_test_00198657.jpg +Places365_test_00198672.jpg +Places365_test_00198677.jpg +Places365_test_00198678.jpg +Places365_test_00198681.jpg +Places365_test_00198684.jpg +Places365_test_00198701.jpg +Places365_test_00198703.jpg +Places365_test_00198705.jpg +Places365_test_00198759.jpg +Places365_test_00198760.jpg +Places365_test_00198787.jpg +Places365_test_00198790.jpg +Places365_test_00198805.jpg +Places365_test_00198809.jpg +Places365_test_00198817.jpg +Places365_test_00198878.jpg +Places365_test_00198880.jpg +Places365_test_00198899.jpg +Places365_test_00198900.jpg +Places365_test_00198913.jpg +Places365_test_00198922.jpg +Places365_test_00198929.jpg +Places365_test_00198985.jpg +Places365_test_00199023.jpg +Places365_test_00199027.jpg +Places365_test_00199032.jpg +Places365_test_00199046.jpg +Places365_test_00199053.jpg +Places365_test_00199055.jpg +Places365_test_00199079.jpg +Places365_test_00199085.jpg +Places365_test_00199092.jpg +Places365_test_00199099.jpg +Places365_test_00199100.jpg +Places365_test_00199114.jpg +Places365_test_00199115.jpg +Places365_test_00199123.jpg +Places365_test_00199149.jpg +Places365_test_00199152.jpg +Places365_test_00199161.jpg +Places365_test_00199162.jpg +Places365_test_00199169.jpg +Places365_test_00199181.jpg +Places365_test_00199190.jpg +Places365_test_00199199.jpg +Places365_test_00199217.jpg +Places365_test_00199225.jpg +Places365_test_00199237.jpg +Places365_test_00199239.jpg +Places365_test_00199243.jpg +Places365_test_00199250.jpg +Places365_test_00199253.jpg +Places365_test_00199258.jpg +Places365_test_00199260.jpg +Places365_test_00199288.jpg +Places365_test_00199291.jpg +Places365_test_00199304.jpg +Places365_test_00199332.jpg +Places365_test_00199337.jpg +Places365_test_00199338.jpg +Places365_test_00199344.jpg +Places365_test_00199349.jpg +Places365_test_00199366.jpg +Places365_test_00199397.jpg +Places365_test_00199433.jpg +Places365_test_00199449.jpg +Places365_test_00199450.jpg +Places365_test_00199494.jpg +Places365_test_00199498.jpg +Places365_test_00199504.jpg +Places365_test_00199543.jpg +Places365_test_00199561.jpg +Places365_test_00199570.jpg +Places365_test_00199575.jpg +Places365_test_00199577.jpg +Places365_test_00199580.jpg +Places365_test_00199582.jpg +Places365_test_00199588.jpg +Places365_test_00199590.jpg +Places365_test_00199604.jpg +Places365_test_00199616.jpg +Places365_test_00199629.jpg +Places365_test_00199645.jpg +Places365_test_00199650.jpg +Places365_test_00199652.jpg +Places365_test_00199663.jpg +Places365_test_00199702.jpg +Places365_test_00199720.jpg +Places365_test_00199730.jpg +Places365_test_00199733.jpg +Places365_test_00199746.jpg +Places365_test_00199749.jpg +Places365_test_00199754.jpg +Places365_test_00199772.jpg +Places365_test_00199779.jpg +Places365_test_00199789.jpg +Places365_test_00199802.jpg +Places365_test_00199827.jpg +Places365_test_00199831.jpg +Places365_test_00199834.jpg +Places365_test_00199837.jpg +Places365_test_00199839.jpg +Places365_test_00199879.jpg +Places365_test_00199893.jpg +Places365_test_00199896.jpg +Places365_test_00199897.jpg +Places365_test_00199903.jpg +Places365_test_00199915.jpg +Places365_test_00199921.jpg +Places365_test_00199930.jpg +Places365_test_00199937.jpg +Places365_test_00199955.jpg +Places365_test_00199956.jpg +Places365_test_00199957.jpg +Places365_test_00199989.jpg +Places365_test_00200001.jpg +Places365_test_00200011.jpg +Places365_test_00200013.jpg +Places365_test_00200023.jpg +Places365_test_00200037.jpg +Places365_test_00200044.jpg +Places365_test_00200047.jpg +Places365_test_00200063.jpg +Places365_test_00200069.jpg +Places365_test_00200074.jpg +Places365_test_00200086.jpg +Places365_test_00200092.jpg +Places365_test_00200108.jpg +Places365_test_00200115.jpg +Places365_test_00200119.jpg +Places365_test_00200122.jpg +Places365_test_00200130.jpg +Places365_test_00200176.jpg +Places365_test_00200187.jpg +Places365_test_00200197.jpg +Places365_test_00200208.jpg +Places365_test_00200220.jpg +Places365_test_00200237.jpg +Places365_test_00200239.jpg +Places365_test_00200240.jpg +Places365_test_00200268.jpg +Places365_test_00200270.jpg +Places365_test_00200275.jpg +Places365_test_00200280.jpg +Places365_test_00200285.jpg +Places365_test_00200292.jpg +Places365_test_00200299.jpg +Places365_test_00200342.jpg +Places365_test_00200350.jpg +Places365_test_00200351.jpg +Places365_test_00200355.jpg +Places365_test_00200360.jpg +Places365_test_00200376.jpg +Places365_test_00200378.jpg +Places365_test_00200383.jpg +Places365_test_00200392.jpg +Places365_test_00200398.jpg +Places365_test_00200402.jpg +Places365_test_00200414.jpg +Places365_test_00200420.jpg +Places365_test_00200432.jpg +Places365_test_00200440.jpg +Places365_test_00200444.jpg +Places365_test_00200448.jpg +Places365_test_00200456.jpg +Places365_test_00200457.jpg +Places365_test_00200463.jpg +Places365_test_00200465.jpg +Places365_test_00200469.jpg +Places365_test_00200477.jpg +Places365_test_00200481.jpg +Places365_test_00200484.jpg +Places365_test_00200496.jpg +Places365_test_00200517.jpg +Places365_test_00200518.jpg +Places365_test_00200525.jpg +Places365_test_00200549.jpg +Places365_test_00200552.jpg +Places365_test_00200569.jpg +Places365_test_00200578.jpg +Places365_test_00200580.jpg +Places365_test_00200582.jpg +Places365_test_00200588.jpg +Places365_test_00200589.jpg +Places365_test_00200597.jpg +Places365_test_00200604.jpg +Places365_test_00200608.jpg +Places365_test_00200609.jpg +Places365_test_00200610.jpg +Places365_test_00200624.jpg +Places365_test_00200628.jpg +Places365_test_00200673.jpg +Places365_test_00200678.jpg +Places365_test_00200710.jpg +Places365_test_00200726.jpg +Places365_test_00200736.jpg +Places365_test_00200743.jpg +Places365_test_00200756.jpg +Places365_test_00200757.jpg +Places365_test_00200764.jpg +Places365_test_00200770.jpg +Places365_test_00200778.jpg +Places365_test_00200782.jpg +Places365_test_00200790.jpg +Places365_test_00200793.jpg +Places365_test_00200818.jpg +Places365_test_00200837.jpg +Places365_test_00200859.jpg +Places365_test_00200871.jpg +Places365_test_00200892.jpg +Places365_test_00200893.jpg +Places365_test_00200902.jpg +Places365_test_00200967.jpg +Places365_test_00200969.jpg +Places365_test_00200975.jpg +Places365_test_00200985.jpg +Places365_test_00200989.jpg +Places365_test_00200998.jpg +Places365_test_00201004.jpg +Places365_test_00201022.jpg +Places365_test_00201036.jpg +Places365_test_00201048.jpg +Places365_test_00201055.jpg +Places365_test_00201061.jpg +Places365_test_00201071.jpg +Places365_test_00201084.jpg +Places365_test_00201120.jpg +Places365_test_00201142.jpg +Places365_test_00201148.jpg +Places365_test_00201149.jpg +Places365_test_00201179.jpg +Places365_test_00201180.jpg +Places365_test_00201189.jpg +Places365_test_00201201.jpg +Places365_test_00201235.jpg +Places365_test_00201244.jpg +Places365_test_00201249.jpg +Places365_test_00201290.jpg +Places365_test_00201297.jpg +Places365_test_00201303.jpg +Places365_test_00201323.jpg +Places365_test_00201325.jpg +Places365_test_00201327.jpg +Places365_test_00201361.jpg +Places365_test_00201371.jpg +Places365_test_00201375.jpg +Places365_test_00201381.jpg +Places365_test_00201391.jpg +Places365_test_00201393.jpg +Places365_test_00201399.jpg +Places365_test_00201407.jpg +Places365_test_00201421.jpg +Places365_test_00201422.jpg +Places365_test_00201435.jpg +Places365_test_00201447.jpg +Places365_test_00201451.jpg +Places365_test_00201456.jpg +Places365_test_00201460.jpg +Places365_test_00201467.jpg +Places365_test_00201469.jpg +Places365_test_00201476.jpg +Places365_test_00201482.jpg +Places365_test_00201486.jpg +Places365_test_00201494.jpg +Places365_test_00201497.jpg +Places365_test_00201505.jpg +Places365_test_00201513.jpg +Places365_test_00201514.jpg +Places365_test_00201537.jpg +Places365_test_00201540.jpg +Places365_test_00201554.jpg +Places365_test_00201557.jpg +Places365_test_00201582.jpg +Places365_test_00201597.jpg +Places365_test_00201611.jpg +Places365_test_00201633.jpg +Places365_test_00201640.jpg +Places365_test_00201659.jpg +Places365_test_00201672.jpg +Places365_test_00201674.jpg +Places365_test_00201680.jpg +Places365_test_00201693.jpg +Places365_test_00201698.jpg +Places365_test_00201726.jpg +Places365_test_00201727.jpg +Places365_test_00201734.jpg +Places365_test_00201738.jpg +Places365_test_00201760.jpg +Places365_test_00201776.jpg +Places365_test_00201820.jpg +Places365_test_00201825.jpg +Places365_test_00201828.jpg +Places365_test_00201837.jpg +Places365_test_00201852.jpg +Places365_test_00201860.jpg +Places365_test_00201872.jpg +Places365_test_00201876.jpg +Places365_test_00201879.jpg +Places365_test_00201894.jpg +Places365_test_00201915.jpg +Places365_test_00201922.jpg +Places365_test_00201933.jpg +Places365_test_00201957.jpg +Places365_test_00201964.jpg +Places365_test_00201966.jpg +Places365_test_00201970.jpg +Places365_test_00201979.jpg +Places365_test_00201982.jpg +Places365_test_00201989.jpg +Places365_test_00201997.jpg +Places365_test_00201999.jpg +Places365_test_00202016.jpg +Places365_test_00202025.jpg +Places365_test_00202053.jpg +Places365_test_00202059.jpg +Places365_test_00202077.jpg +Places365_test_00202111.jpg +Places365_test_00202125.jpg +Places365_test_00202134.jpg +Places365_test_00202139.jpg +Places365_test_00202149.jpg +Places365_test_00202160.jpg +Places365_test_00202168.jpg +Places365_test_00202175.jpg +Places365_test_00202180.jpg +Places365_test_00202186.jpg +Places365_test_00202201.jpg +Places365_test_00202204.jpg +Places365_test_00202214.jpg +Places365_test_00202251.jpg +Places365_test_00202262.jpg +Places365_test_00202269.jpg +Places365_test_00202279.jpg +Places365_test_00202280.jpg +Places365_test_00202294.jpg +Places365_test_00202297.jpg +Places365_test_00202302.jpg +Places365_test_00202303.jpg +Places365_test_00202312.jpg +Places365_test_00202316.jpg +Places365_test_00202322.jpg +Places365_test_00202333.jpg +Places365_test_00202345.jpg +Places365_test_00202348.jpg +Places365_test_00202352.jpg +Places365_test_00202369.jpg +Places365_test_00202396.jpg +Places365_test_00202404.jpg +Places365_test_00202413.jpg +Places365_test_00202440.jpg +Places365_test_00202468.jpg +Places365_test_00202469.jpg +Places365_test_00202476.jpg +Places365_test_00202479.jpg +Places365_test_00202495.jpg +Places365_test_00202512.jpg +Places365_test_00202516.jpg +Places365_test_00202519.jpg +Places365_test_00202521.jpg +Places365_test_00202528.jpg +Places365_test_00202536.jpg +Places365_test_00202551.jpg +Places365_test_00202558.jpg +Places365_test_00202600.jpg +Places365_test_00202627.jpg +Places365_test_00202629.jpg +Places365_test_00202642.jpg +Places365_test_00202659.jpg +Places365_test_00202676.jpg +Places365_test_00202680.jpg +Places365_test_00202682.jpg +Places365_test_00202684.jpg +Places365_test_00202694.jpg +Places365_test_00202715.jpg +Places365_test_00202738.jpg +Places365_test_00202744.jpg +Places365_test_00202745.jpg +Places365_test_00202765.jpg +Places365_test_00202771.jpg +Places365_test_00202791.jpg +Places365_test_00202806.jpg +Places365_test_00202823.jpg +Places365_test_00202825.jpg +Places365_test_00202832.jpg +Places365_test_00202853.jpg +Places365_test_00202861.jpg +Places365_test_00202915.jpg +Places365_test_00202949.jpg +Places365_test_00202967.jpg +Places365_test_00202973.jpg +Places365_test_00202991.jpg +Places365_test_00202993.jpg +Places365_test_00202998.jpg +Places365_test_00203014.jpg +Places365_test_00203023.jpg +Places365_test_00203025.jpg +Places365_test_00203026.jpg +Places365_test_00203028.jpg +Places365_test_00203055.jpg +Places365_test_00203078.jpg +Places365_test_00203086.jpg +Places365_test_00203090.jpg +Places365_test_00203098.jpg +Places365_test_00203101.jpg +Places365_test_00203104.jpg +Places365_test_00203125.jpg +Places365_test_00203126.jpg +Places365_test_00203129.jpg +Places365_test_00203132.jpg +Places365_test_00203144.jpg +Places365_test_00203147.jpg +Places365_test_00203158.jpg +Places365_test_00203177.jpg +Places365_test_00203190.jpg +Places365_test_00203199.jpg +Places365_test_00203215.jpg +Places365_test_00203219.jpg +Places365_test_00203221.jpg +Places365_test_00203235.jpg +Places365_test_00203248.jpg +Places365_test_00203260.jpg +Places365_test_00203275.jpg +Places365_test_00203276.jpg +Places365_test_00203278.jpg +Places365_test_00203301.jpg +Places365_test_00203302.jpg +Places365_test_00203326.jpg +Places365_test_00203328.jpg +Places365_test_00203331.jpg +Places365_test_00203359.jpg +Places365_test_00203376.jpg +Places365_test_00203380.jpg +Places365_test_00203381.jpg +Places365_test_00203383.jpg +Places365_test_00203389.jpg +Places365_test_00203390.jpg +Places365_test_00203407.jpg +Places365_test_00203415.jpg +Places365_test_00203458.jpg +Places365_test_00203497.jpg +Places365_test_00203524.jpg +Places365_test_00203538.jpg +Places365_test_00203542.jpg +Places365_test_00203543.jpg +Places365_test_00203545.jpg +Places365_test_00203555.jpg +Places365_test_00203557.jpg +Places365_test_00203565.jpg +Places365_test_00203578.jpg +Places365_test_00203591.jpg +Places365_test_00203594.jpg +Places365_test_00203605.jpg +Places365_test_00203651.jpg +Places365_test_00203667.jpg +Places365_test_00203668.jpg +Places365_test_00203673.jpg +Places365_test_00203675.jpg +Places365_test_00203682.jpg +Places365_test_00203688.jpg +Places365_test_00203695.jpg +Places365_test_00203719.jpg +Places365_test_00203734.jpg +Places365_test_00203740.jpg +Places365_test_00203747.jpg +Places365_test_00203751.jpg +Places365_test_00203757.jpg +Places365_test_00203780.jpg +Places365_test_00203790.jpg +Places365_test_00203795.jpg +Places365_test_00203801.jpg +Places365_test_00203833.jpg +Places365_test_00203846.jpg +Places365_test_00203865.jpg +Places365_test_00203884.jpg +Places365_test_00203896.jpg +Places365_test_00203901.jpg +Places365_test_00203902.jpg +Places365_test_00203914.jpg +Places365_test_00203941.jpg +Places365_test_00203951.jpg +Places365_test_00203972.jpg +Places365_test_00203979.jpg +Places365_test_00203980.jpg +Places365_test_00203987.jpg +Places365_test_00204010.jpg +Places365_test_00204016.jpg +Places365_test_00204017.jpg +Places365_test_00204021.jpg +Places365_test_00204030.jpg +Places365_test_00204046.jpg +Places365_test_00204068.jpg +Places365_test_00204079.jpg +Places365_test_00204089.jpg +Places365_test_00204095.jpg +Places365_test_00204107.jpg +Places365_test_00204114.jpg +Places365_test_00204115.jpg +Places365_test_00204139.jpg +Places365_test_00204151.jpg +Places365_test_00204153.jpg +Places365_test_00204160.jpg +Places365_test_00204177.jpg +Places365_test_00204185.jpg +Places365_test_00204193.jpg +Places365_test_00204203.jpg +Places365_test_00204211.jpg +Places365_test_00204219.jpg +Places365_test_00204222.jpg +Places365_test_00204230.jpg +Places365_test_00204234.jpg +Places365_test_00204236.jpg +Places365_test_00204237.jpg +Places365_test_00204241.jpg +Places365_test_00204246.jpg +Places365_test_00204257.jpg +Places365_test_00204260.jpg +Places365_test_00204273.jpg +Places365_test_00204278.jpg +Places365_test_00204294.jpg +Places365_test_00204299.jpg +Places365_test_00204303.jpg +Places365_test_00204310.jpg +Places365_test_00204322.jpg +Places365_test_00204353.jpg +Places365_test_00204364.jpg +Places365_test_00204372.jpg +Places365_test_00204379.jpg +Places365_test_00204390.jpg +Places365_test_00204398.jpg +Places365_test_00204400.jpg +Places365_test_00204401.jpg +Places365_test_00204408.jpg +Places365_test_00204440.jpg +Places365_test_00204448.jpg +Places365_test_00204467.jpg +Places365_test_00204470.jpg +Places365_test_00204478.jpg +Places365_test_00204489.jpg +Places365_test_00204494.jpg +Places365_test_00204496.jpg +Places365_test_00204497.jpg +Places365_test_00204505.jpg +Places365_test_00204533.jpg +Places365_test_00204539.jpg +Places365_test_00204553.jpg +Places365_test_00204563.jpg +Places365_test_00204574.jpg +Places365_test_00204583.jpg +Places365_test_00204584.jpg +Places365_test_00204601.jpg +Places365_test_00204606.jpg +Places365_test_00204627.jpg +Places365_test_00204634.jpg +Places365_test_00204637.jpg +Places365_test_00204662.jpg +Places365_test_00204672.jpg +Places365_test_00204684.jpg +Places365_test_00204704.jpg +Places365_test_00204711.jpg +Places365_test_00204737.jpg +Places365_test_00204745.jpg +Places365_test_00204759.jpg +Places365_test_00204760.jpg +Places365_test_00204763.jpg +Places365_test_00204771.jpg +Places365_test_00204782.jpg +Places365_test_00204787.jpg +Places365_test_00204796.jpg +Places365_test_00204799.jpg +Places365_test_00204809.jpg +Places365_test_00204812.jpg +Places365_test_00204826.jpg +Places365_test_00204831.jpg +Places365_test_00204843.jpg +Places365_test_00204857.jpg +Places365_test_00204885.jpg +Places365_test_00204945.jpg +Places365_test_00204953.jpg +Places365_test_00204954.jpg +Places365_test_00204962.jpg +Places365_test_00204976.jpg +Places365_test_00204978.jpg +Places365_test_00204991.jpg +Places365_test_00204997.jpg +Places365_test_00205000.jpg +Places365_test_00205002.jpg +Places365_test_00205006.jpg +Places365_test_00205009.jpg +Places365_test_00205013.jpg +Places365_test_00205030.jpg +Places365_test_00205038.jpg +Places365_test_00205042.jpg +Places365_test_00205043.jpg +Places365_test_00205066.jpg +Places365_test_00205082.jpg +Places365_test_00205084.jpg +Places365_test_00205089.jpg +Places365_test_00205091.jpg +Places365_test_00205106.jpg +Places365_test_00205133.jpg +Places365_test_00205155.jpg +Places365_test_00205170.jpg +Places365_test_00205175.jpg +Places365_test_00205185.jpg +Places365_test_00205204.jpg +Places365_test_00205206.jpg +Places365_test_00205241.jpg +Places365_test_00205243.jpg +Places365_test_00205245.jpg +Places365_test_00205254.jpg +Places365_test_00205263.jpg +Places365_test_00205269.jpg +Places365_test_00205276.jpg +Places365_test_00205293.jpg +Places365_test_00205304.jpg +Places365_test_00205335.jpg +Places365_test_00205343.jpg +Places365_test_00205355.jpg +Places365_test_00205358.jpg +Places365_test_00205360.jpg +Places365_test_00205364.jpg +Places365_test_00205369.jpg +Places365_test_00205382.jpg +Places365_test_00205387.jpg +Places365_test_00205458.jpg +Places365_test_00205495.jpg +Places365_test_00205527.jpg +Places365_test_00205538.jpg +Places365_test_00205545.jpg +Places365_test_00205557.jpg +Places365_test_00205561.jpg +Places365_test_00205573.jpg +Places365_test_00205607.jpg +Places365_test_00205633.jpg +Places365_test_00205641.jpg +Places365_test_00205644.jpg +Places365_test_00205651.jpg +Places365_test_00205659.jpg +Places365_test_00205701.jpg +Places365_test_00205705.jpg +Places365_test_00205707.jpg +Places365_test_00205709.jpg +Places365_test_00205724.jpg +Places365_test_00205742.jpg +Places365_test_00205745.jpg +Places365_test_00205750.jpg +Places365_test_00205758.jpg +Places365_test_00205759.jpg +Places365_test_00205770.jpg +Places365_test_00205788.jpg +Places365_test_00205792.jpg +Places365_test_00205801.jpg +Places365_test_00205804.jpg +Places365_test_00205821.jpg +Places365_test_00205836.jpg +Places365_test_00205846.jpg +Places365_test_00205848.jpg +Places365_test_00205850.jpg +Places365_test_00205854.jpg +Places365_test_00205859.jpg +Places365_test_00205868.jpg +Places365_test_00205870.jpg +Places365_test_00205873.jpg +Places365_test_00205874.jpg +Places365_test_00205876.jpg +Places365_test_00205882.jpg +Places365_test_00205895.jpg +Places365_test_00205899.jpg +Places365_test_00205901.jpg +Places365_test_00205915.jpg +Places365_test_00205932.jpg +Places365_test_00205937.jpg +Places365_test_00205965.jpg +Places365_test_00205970.jpg +Places365_test_00205975.jpg +Places365_test_00205978.jpg +Places365_test_00205980.jpg +Places365_test_00205998.jpg +Places365_test_00206004.jpg +Places365_test_00206010.jpg +Places365_test_00206031.jpg +Places365_test_00206049.jpg +Places365_test_00206077.jpg +Places365_test_00206083.jpg +Places365_test_00206086.jpg +Places365_test_00206087.jpg +Places365_test_00206090.jpg +Places365_test_00206093.jpg +Places365_test_00206100.jpg +Places365_test_00206109.jpg +Places365_test_00206111.jpg +Places365_test_00206114.jpg +Places365_test_00206123.jpg +Places365_test_00206128.jpg +Places365_test_00206146.jpg +Places365_test_00206154.jpg +Places365_test_00206160.jpg +Places365_test_00206167.jpg +Places365_test_00206170.jpg +Places365_test_00206182.jpg +Places365_test_00206189.jpg +Places365_test_00206190.jpg +Places365_test_00206206.jpg +Places365_test_00206222.jpg +Places365_test_00206226.jpg +Places365_test_00206232.jpg +Places365_test_00206252.jpg +Places365_test_00206263.jpg +Places365_test_00206277.jpg +Places365_test_00206279.jpg +Places365_test_00206292.jpg +Places365_test_00206298.jpg +Places365_test_00206300.jpg +Places365_test_00206312.jpg +Places365_test_00206318.jpg +Places365_test_00206332.jpg +Places365_test_00206342.jpg +Places365_test_00206345.jpg +Places365_test_00206346.jpg +Places365_test_00206375.jpg +Places365_test_00206394.jpg +Places365_test_00206421.jpg +Places365_test_00206427.jpg +Places365_test_00206441.jpg +Places365_test_00206447.jpg +Places365_test_00206448.jpg +Places365_test_00206451.jpg +Places365_test_00206455.jpg +Places365_test_00206458.jpg +Places365_test_00206463.jpg +Places365_test_00206469.jpg +Places365_test_00206484.jpg +Places365_test_00206490.jpg +Places365_test_00206497.jpg +Places365_test_00206505.jpg +Places365_test_00206509.jpg +Places365_test_00206524.jpg +Places365_test_00206545.jpg +Places365_test_00206548.jpg +Places365_test_00206566.jpg +Places365_test_00206576.jpg +Places365_test_00206582.jpg +Places365_test_00206608.jpg +Places365_test_00206610.jpg +Places365_test_00206630.jpg +Places365_test_00206668.jpg +Places365_test_00206685.jpg +Places365_test_00206687.jpg +Places365_test_00206689.jpg +Places365_test_00206695.jpg +Places365_test_00206708.jpg +Places365_test_00206725.jpg +Places365_test_00206767.jpg +Places365_test_00206776.jpg +Places365_test_00206799.jpg +Places365_test_00206801.jpg +Places365_test_00206814.jpg +Places365_test_00206820.jpg +Places365_test_00206824.jpg +Places365_test_00206832.jpg +Places365_test_00206835.jpg +Places365_test_00206840.jpg +Places365_test_00206873.jpg +Places365_test_00206881.jpg +Places365_test_00206896.jpg +Places365_test_00206907.jpg +Places365_test_00206908.jpg +Places365_test_00206912.jpg +Places365_test_00206920.jpg +Places365_test_00206927.jpg +Places365_test_00206942.jpg +Places365_test_00206944.jpg +Places365_test_00206956.jpg +Places365_test_00206958.jpg +Places365_test_00206961.jpg +Places365_test_00206973.jpg +Places365_test_00206982.jpg +Places365_test_00207013.jpg +Places365_test_00207027.jpg +Places365_test_00207041.jpg +Places365_test_00207043.jpg +Places365_test_00207044.jpg +Places365_test_00207052.jpg +Places365_test_00207062.jpg +Places365_test_00207084.jpg +Places365_test_00207092.jpg +Places365_test_00207101.jpg +Places365_test_00207118.jpg +Places365_test_00207141.jpg +Places365_test_00207143.jpg +Places365_test_00207145.jpg +Places365_test_00207147.jpg +Places365_test_00207153.jpg +Places365_test_00207167.jpg +Places365_test_00207193.jpg +Places365_test_00207194.jpg +Places365_test_00207212.jpg +Places365_test_00207213.jpg +Places365_test_00207216.jpg +Places365_test_00207220.jpg +Places365_test_00207224.jpg +Places365_test_00207225.jpg +Places365_test_00207250.jpg +Places365_test_00207257.jpg +Places365_test_00207259.jpg +Places365_test_00207266.jpg +Places365_test_00207284.jpg +Places365_test_00207285.jpg +Places365_test_00207295.jpg +Places365_test_00207321.jpg +Places365_test_00207349.jpg +Places365_test_00207355.jpg +Places365_test_00207389.jpg +Places365_test_00207405.jpg +Places365_test_00207425.jpg +Places365_test_00207439.jpg +Places365_test_00207445.jpg +Places365_test_00207446.jpg +Places365_test_00207464.jpg +Places365_test_00207502.jpg +Places365_test_00207503.jpg +Places365_test_00207533.jpg +Places365_test_00207557.jpg +Places365_test_00207577.jpg +Places365_test_00207581.jpg +Places365_test_00207600.jpg +Places365_test_00207601.jpg +Places365_test_00207608.jpg +Places365_test_00207613.jpg +Places365_test_00207621.jpg +Places365_test_00207630.jpg +Places365_test_00207639.jpg +Places365_test_00207644.jpg +Places365_test_00207663.jpg +Places365_test_00207674.jpg +Places365_test_00207680.jpg +Places365_test_00207683.jpg +Places365_test_00207685.jpg +Places365_test_00207690.jpg +Places365_test_00207707.jpg +Places365_test_00207712.jpg +Places365_test_00207719.jpg +Places365_test_00207724.jpg +Places365_test_00207725.jpg +Places365_test_00207729.jpg +Places365_test_00207734.jpg +Places365_test_00207740.jpg +Places365_test_00207747.jpg +Places365_test_00207757.jpg +Places365_test_00207761.jpg +Places365_test_00207768.jpg +Places365_test_00207770.jpg +Places365_test_00207775.jpg +Places365_test_00207787.jpg +Places365_test_00207788.jpg +Places365_test_00207801.jpg +Places365_test_00207802.jpg +Places365_test_00207841.jpg +Places365_test_00207852.jpg +Places365_test_00207857.jpg +Places365_test_00207871.jpg +Places365_test_00207873.jpg +Places365_test_00207892.jpg +Places365_test_00207896.jpg +Places365_test_00207897.jpg +Places365_test_00207905.jpg +Places365_test_00207906.jpg +Places365_test_00207940.jpg +Places365_test_00207957.jpg +Places365_test_00207960.jpg +Places365_test_00207961.jpg +Places365_test_00207967.jpg +Places365_test_00207986.jpg +Places365_test_00207996.jpg +Places365_test_00208011.jpg +Places365_test_00208013.jpg +Places365_test_00208038.jpg +Places365_test_00208059.jpg +Places365_test_00208085.jpg +Places365_test_00208094.jpg +Places365_test_00208104.jpg +Places365_test_00208107.jpg +Places365_test_00208126.jpg +Places365_test_00208130.jpg +Places365_test_00208131.jpg +Places365_test_00208140.jpg +Places365_test_00208142.jpg +Places365_test_00208147.jpg +Places365_test_00208153.jpg +Places365_test_00208172.jpg +Places365_test_00208182.jpg +Places365_test_00208193.jpg +Places365_test_00208208.jpg +Places365_test_00208227.jpg +Places365_test_00208228.jpg +Places365_test_00208239.jpg +Places365_test_00208245.jpg +Places365_test_00208286.jpg +Places365_test_00208306.jpg +Places365_test_00208316.jpg +Places365_test_00208324.jpg +Places365_test_00208344.jpg +Places365_test_00208358.jpg +Places365_test_00208390.jpg +Places365_test_00208395.jpg +Places365_test_00208396.jpg +Places365_test_00208398.jpg +Places365_test_00208421.jpg +Places365_test_00208424.jpg +Places365_test_00208434.jpg +Places365_test_00208441.jpg +Places365_test_00208447.jpg +Places365_test_00208464.jpg +Places365_test_00208476.jpg +Places365_test_00208500.jpg +Places365_test_00208516.jpg +Places365_test_00208524.jpg +Places365_test_00208529.jpg +Places365_test_00208535.jpg +Places365_test_00208537.jpg +Places365_test_00208544.jpg +Places365_test_00208550.jpg +Places365_test_00208557.jpg +Places365_test_00208562.jpg +Places365_test_00208563.jpg +Places365_test_00208590.jpg +Places365_test_00208593.jpg +Places365_test_00208597.jpg +Places365_test_00208613.jpg +Places365_test_00208624.jpg +Places365_test_00208630.jpg +Places365_test_00208632.jpg +Places365_test_00208644.jpg +Places365_test_00208660.jpg +Places365_test_00208683.jpg +Places365_test_00208719.jpg +Places365_test_00208724.jpg +Places365_test_00208756.jpg +Places365_test_00208767.jpg +Places365_test_00208803.jpg +Places365_test_00208812.jpg +Places365_test_00208845.jpg +Places365_test_00208851.jpg +Places365_test_00208857.jpg +Places365_test_00208858.jpg +Places365_test_00208890.jpg +Places365_test_00208902.jpg +Places365_test_00208903.jpg +Places365_test_00208915.jpg +Places365_test_00208917.jpg +Places365_test_00208925.jpg +Places365_test_00208926.jpg +Places365_test_00208930.jpg +Places365_test_00208936.jpg +Places365_test_00208940.jpg +Places365_test_00208950.jpg +Places365_test_00208958.jpg +Places365_test_00208959.jpg +Places365_test_00208996.jpg +Places365_test_00209018.jpg +Places365_test_00209024.jpg +Places365_test_00209034.jpg +Places365_test_00209039.jpg +Places365_test_00209058.jpg +Places365_test_00209107.jpg +Places365_test_00209121.jpg +Places365_test_00209153.jpg +Places365_test_00209205.jpg +Places365_test_00209217.jpg +Places365_test_00209242.jpg +Places365_test_00209261.jpg +Places365_test_00209289.jpg +Places365_test_00209295.jpg +Places365_test_00209298.jpg +Places365_test_00209299.jpg +Places365_test_00209310.jpg +Places365_test_00209311.jpg +Places365_test_00209317.jpg +Places365_test_00209325.jpg +Places365_test_00209352.jpg +Places365_test_00209379.jpg +Places365_test_00209384.jpg +Places365_test_00209405.jpg +Places365_test_00209417.jpg +Places365_test_00209433.jpg +Places365_test_00209450.jpg +Places365_test_00209453.jpg +Places365_test_00209457.jpg +Places365_test_00209462.jpg +Places365_test_00209487.jpg +Places365_test_00209494.jpg +Places365_test_00209513.jpg +Places365_test_00209526.jpg +Places365_test_00209538.jpg +Places365_test_00209551.jpg +Places365_test_00209555.jpg +Places365_test_00209580.jpg +Places365_test_00209584.jpg +Places365_test_00209610.jpg +Places365_test_00209614.jpg +Places365_test_00209634.jpg +Places365_test_00209641.jpg +Places365_test_00209643.jpg +Places365_test_00209654.jpg +Places365_test_00209657.jpg +Places365_test_00209688.jpg +Places365_test_00209692.jpg +Places365_test_00209720.jpg +Places365_test_00209726.jpg +Places365_test_00209735.jpg +Places365_test_00209738.jpg +Places365_test_00209767.jpg +Places365_test_00209769.jpg +Places365_test_00209775.jpg +Places365_test_00209785.jpg +Places365_test_00209808.jpg +Places365_test_00209827.jpg +Places365_test_00209830.jpg +Places365_test_00209841.jpg +Places365_test_00209844.jpg +Places365_test_00209858.jpg +Places365_test_00209868.jpg +Places365_test_00209877.jpg +Places365_test_00209887.jpg +Places365_test_00209890.jpg +Places365_test_00209895.jpg +Places365_test_00209925.jpg +Places365_test_00209935.jpg +Places365_test_00209938.jpg +Places365_test_00209939.jpg +Places365_test_00209951.jpg +Places365_test_00209952.jpg +Places365_test_00209980.jpg +Places365_test_00209997.jpg +Places365_test_00210004.jpg +Places365_test_00210008.jpg +Places365_test_00210019.jpg +Places365_test_00210020.jpg +Places365_test_00210053.jpg +Places365_test_00210054.jpg +Places365_test_00210086.jpg +Places365_test_00210108.jpg +Places365_test_00210110.jpg +Places365_test_00210141.jpg +Places365_test_00210169.jpg +Places365_test_00210236.jpg +Places365_test_00210245.jpg +Places365_test_00210258.jpg +Places365_test_00210278.jpg +Places365_test_00210302.jpg +Places365_test_00210327.jpg +Places365_test_00210334.jpg +Places365_test_00210335.jpg +Places365_test_00210346.jpg +Places365_test_00210357.jpg +Places365_test_00210393.jpg +Places365_test_00210412.jpg +Places365_test_00210415.jpg +Places365_test_00210420.jpg +Places365_test_00210422.jpg +Places365_test_00210439.jpg +Places365_test_00210462.jpg +Places365_test_00210470.jpg +Places365_test_00210483.jpg +Places365_test_00210503.jpg +Places365_test_00210508.jpg +Places365_test_00210514.jpg +Places365_test_00210515.jpg +Places365_test_00210519.jpg +Places365_test_00210520.jpg +Places365_test_00210533.jpg +Places365_test_00210570.jpg +Places365_test_00210586.jpg +Places365_test_00210606.jpg +Places365_test_00210613.jpg +Places365_test_00210637.jpg +Places365_test_00210648.jpg +Places365_test_00210658.jpg +Places365_test_00210661.jpg +Places365_test_00210666.jpg +Places365_test_00210682.jpg +Places365_test_00210683.jpg +Places365_test_00210698.jpg +Places365_test_00210712.jpg +Places365_test_00210733.jpg +Places365_test_00210744.jpg +Places365_test_00210766.jpg +Places365_test_00210767.jpg +Places365_test_00210773.jpg +Places365_test_00210787.jpg +Places365_test_00210813.jpg +Places365_test_00210842.jpg +Places365_test_00210865.jpg +Places365_test_00210880.jpg +Places365_test_00210896.jpg +Places365_test_00210905.jpg +Places365_test_00210912.jpg +Places365_test_00210922.jpg +Places365_test_00210923.jpg +Places365_test_00210924.jpg +Places365_test_00210932.jpg +Places365_test_00210947.jpg +Places365_test_00210950.jpg +Places365_test_00210961.jpg +Places365_test_00210975.jpg +Places365_test_00211003.jpg +Places365_test_00211007.jpg +Places365_test_00211009.jpg +Places365_test_00211013.jpg +Places365_test_00211038.jpg +Places365_test_00211039.jpg +Places365_test_00211045.jpg +Places365_test_00211050.jpg +Places365_test_00211053.jpg +Places365_test_00211056.jpg +Places365_test_00211067.jpg +Places365_test_00211068.jpg +Places365_test_00211071.jpg +Places365_test_00211077.jpg +Places365_test_00211098.jpg +Places365_test_00211103.jpg +Places365_test_00211113.jpg +Places365_test_00211115.jpg +Places365_test_00211116.jpg +Places365_test_00211118.jpg +Places365_test_00211120.jpg +Places365_test_00211148.jpg +Places365_test_00211171.jpg +Places365_test_00211182.jpg +Places365_test_00211243.jpg +Places365_test_00211244.jpg +Places365_test_00211254.jpg +Places365_test_00211267.jpg +Places365_test_00211271.jpg +Places365_test_00211289.jpg +Places365_test_00211306.jpg +Places365_test_00211312.jpg +Places365_test_00211317.jpg +Places365_test_00211318.jpg +Places365_test_00211348.jpg +Places365_test_00211368.jpg +Places365_test_00211383.jpg +Places365_test_00211392.jpg +Places365_test_00211414.jpg +Places365_test_00211418.jpg +Places365_test_00211422.jpg +Places365_test_00211427.jpg +Places365_test_00211433.jpg +Places365_test_00211447.jpg +Places365_test_00211456.jpg +Places365_test_00211464.jpg +Places365_test_00211477.jpg +Places365_test_00211485.jpg +Places365_test_00211488.jpg +Places365_test_00211498.jpg +Places365_test_00211499.jpg +Places365_test_00211504.jpg +Places365_test_00211544.jpg +Places365_test_00211554.jpg +Places365_test_00211569.jpg +Places365_test_00211571.jpg +Places365_test_00211574.jpg +Places365_test_00211575.jpg +Places365_test_00211576.jpg +Places365_test_00211579.jpg +Places365_test_00211587.jpg +Places365_test_00211606.jpg +Places365_test_00211615.jpg +Places365_test_00211621.jpg +Places365_test_00211632.jpg +Places365_test_00211636.jpg +Places365_test_00211643.jpg +Places365_test_00211652.jpg +Places365_test_00211653.jpg +Places365_test_00211655.jpg +Places365_test_00211679.jpg +Places365_test_00211689.jpg +Places365_test_00211691.jpg +Places365_test_00211693.jpg +Places365_test_00211706.jpg +Places365_test_00211709.jpg +Places365_test_00211757.jpg +Places365_test_00211764.jpg +Places365_test_00211769.jpg +Places365_test_00211791.jpg +Places365_test_00211794.jpg +Places365_test_00211809.jpg +Places365_test_00211812.jpg +Places365_test_00211840.jpg +Places365_test_00211848.jpg +Places365_test_00211856.jpg +Places365_test_00211865.jpg +Places365_test_00211869.jpg +Places365_test_00211877.jpg +Places365_test_00211882.jpg +Places365_test_00211883.jpg +Places365_test_00211892.jpg +Places365_test_00211895.jpg +Places365_test_00211915.jpg +Places365_test_00211918.jpg +Places365_test_00211924.jpg +Places365_test_00211927.jpg +Places365_test_00211931.jpg +Places365_test_00211934.jpg +Places365_test_00211947.jpg +Places365_test_00211969.jpg +Places365_test_00211975.jpg +Places365_test_00211997.jpg +Places365_test_00212004.jpg +Places365_test_00212010.jpg +Places365_test_00212017.jpg +Places365_test_00212024.jpg +Places365_test_00212025.jpg +Places365_test_00212036.jpg +Places365_test_00212043.jpg +Places365_test_00212044.jpg +Places365_test_00212048.jpg +Places365_test_00212083.jpg +Places365_test_00212085.jpg +Places365_test_00212105.jpg +Places365_test_00212120.jpg +Places365_test_00212156.jpg +Places365_test_00212196.jpg +Places365_test_00212205.jpg +Places365_test_00212224.jpg +Places365_test_00212229.jpg +Places365_test_00212230.jpg +Places365_test_00212247.jpg +Places365_test_00212284.jpg +Places365_test_00212304.jpg +Places365_test_00212305.jpg +Places365_test_00212318.jpg +Places365_test_00212328.jpg +Places365_test_00212335.jpg +Places365_test_00212370.jpg +Places365_test_00212376.jpg +Places365_test_00212378.jpg +Places365_test_00212392.jpg +Places365_test_00212430.jpg +Places365_test_00212432.jpg +Places365_test_00212444.jpg +Places365_test_00212452.jpg +Places365_test_00212456.jpg +Places365_test_00212459.jpg +Places365_test_00212470.jpg +Places365_test_00212477.jpg +Places365_test_00212504.jpg +Places365_test_00212523.jpg +Places365_test_00212541.jpg +Places365_test_00212549.jpg +Places365_test_00212562.jpg +Places365_test_00212587.jpg +Places365_test_00212591.jpg +Places365_test_00212592.jpg +Places365_test_00212599.jpg +Places365_test_00212631.jpg +Places365_test_00212638.jpg +Places365_test_00212647.jpg +Places365_test_00212665.jpg +Places365_test_00212668.jpg +Places365_test_00212708.jpg +Places365_test_00212716.jpg +Places365_test_00212721.jpg +Places365_test_00212723.jpg +Places365_test_00212748.jpg +Places365_test_00212781.jpg +Places365_test_00212818.jpg +Places365_test_00212821.jpg +Places365_test_00212833.jpg +Places365_test_00212836.jpg +Places365_test_00212844.jpg +Places365_test_00212847.jpg +Places365_test_00212849.jpg +Places365_test_00212852.jpg +Places365_test_00212863.jpg +Places365_test_00212869.jpg +Places365_test_00212876.jpg +Places365_test_00212879.jpg +Places365_test_00212886.jpg +Places365_test_00212893.jpg +Places365_test_00212913.jpg +Places365_test_00212922.jpg +Places365_test_00212927.jpg +Places365_test_00212955.jpg +Places365_test_00212956.jpg +Places365_test_00212959.jpg +Places365_test_00212977.jpg +Places365_test_00212982.jpg +Places365_test_00212998.jpg +Places365_test_00213018.jpg +Places365_test_00213049.jpg +Places365_test_00213052.jpg +Places365_test_00213057.jpg +Places365_test_00213080.jpg +Places365_test_00213085.jpg +Places365_test_00213098.jpg +Places365_test_00213109.jpg +Places365_test_00213115.jpg +Places365_test_00213122.jpg +Places365_test_00213134.jpg +Places365_test_00213150.jpg +Places365_test_00213154.jpg +Places365_test_00213157.jpg +Places365_test_00213165.jpg +Places365_test_00213179.jpg +Places365_test_00213185.jpg +Places365_test_00213186.jpg +Places365_test_00213193.jpg +Places365_test_00213204.jpg +Places365_test_00213215.jpg +Places365_test_00213223.jpg +Places365_test_00213224.jpg +Places365_test_00213229.jpg +Places365_test_00213253.jpg +Places365_test_00213266.jpg +Places365_test_00213269.jpg +Places365_test_00213282.jpg +Places365_test_00213283.jpg +Places365_test_00213305.jpg +Places365_test_00213380.jpg +Places365_test_00213384.jpg +Places365_test_00213393.jpg +Places365_test_00213394.jpg +Places365_test_00213408.jpg +Places365_test_00213409.jpg +Places365_test_00213416.jpg +Places365_test_00213420.jpg +Places365_test_00213425.jpg +Places365_test_00213433.jpg +Places365_test_00213451.jpg +Places365_test_00213478.jpg +Places365_test_00213490.jpg +Places365_test_00213509.jpg +Places365_test_00213517.jpg +Places365_test_00213534.jpg +Places365_test_00213545.jpg +Places365_test_00213558.jpg +Places365_test_00213562.jpg +Places365_test_00213580.jpg +Places365_test_00213591.jpg +Places365_test_00213596.jpg +Places365_test_00213600.jpg +Places365_test_00213613.jpg +Places365_test_00213614.jpg +Places365_test_00213615.jpg +Places365_test_00213626.jpg +Places365_test_00213664.jpg +Places365_test_00213666.jpg +Places365_test_00213678.jpg +Places365_test_00213685.jpg +Places365_test_00213694.jpg +Places365_test_00213715.jpg +Places365_test_00213721.jpg +Places365_test_00213727.jpg +Places365_test_00213741.jpg +Places365_test_00213746.jpg +Places365_test_00213748.jpg +Places365_test_00213752.jpg +Places365_test_00213757.jpg +Places365_test_00213770.jpg +Places365_test_00213792.jpg +Places365_test_00213832.jpg +Places365_test_00213859.jpg +Places365_test_00213863.jpg +Places365_test_00213868.jpg +Places365_test_00213877.jpg +Places365_test_00213883.jpg +Places365_test_00213888.jpg +Places365_test_00213892.jpg +Places365_test_00213899.jpg +Places365_test_00213918.jpg +Places365_test_00213955.jpg +Places365_test_00213968.jpg +Places365_test_00213980.jpg +Places365_test_00213990.jpg +Places365_test_00214026.jpg +Places365_test_00214054.jpg +Places365_test_00214058.jpg +Places365_test_00214060.jpg +Places365_test_00214069.jpg +Places365_test_00214072.jpg +Places365_test_00214111.jpg +Places365_test_00214121.jpg +Places365_test_00214123.jpg +Places365_test_00214127.jpg +Places365_test_00214134.jpg +Places365_test_00214153.jpg +Places365_test_00214156.jpg +Places365_test_00214182.jpg +Places365_test_00214192.jpg +Places365_test_00214196.jpg +Places365_test_00214205.jpg +Places365_test_00214206.jpg +Places365_test_00214211.jpg +Places365_test_00214222.jpg +Places365_test_00214224.jpg +Places365_test_00214225.jpg +Places365_test_00214229.jpg +Places365_test_00214236.jpg +Places365_test_00214245.jpg +Places365_test_00214267.jpg +Places365_test_00214282.jpg +Places365_test_00214289.jpg +Places365_test_00214294.jpg +Places365_test_00214319.jpg +Places365_test_00214325.jpg +Places365_test_00214327.jpg +Places365_test_00214328.jpg +Places365_test_00214329.jpg +Places365_test_00214335.jpg +Places365_test_00214345.jpg +Places365_test_00214367.jpg +Places365_test_00214370.jpg +Places365_test_00214371.jpg +Places365_test_00214372.jpg +Places365_test_00214379.jpg +Places365_test_00214399.jpg +Places365_test_00214404.jpg +Places365_test_00214412.jpg +Places365_test_00214426.jpg +Places365_test_00214438.jpg +Places365_test_00214459.jpg +Places365_test_00214468.jpg +Places365_test_00214474.jpg +Places365_test_00214476.jpg +Places365_test_00214485.jpg +Places365_test_00214500.jpg +Places365_test_00214505.jpg +Places365_test_00214507.jpg +Places365_test_00214516.jpg +Places365_test_00214534.jpg +Places365_test_00214539.jpg +Places365_test_00214543.jpg +Places365_test_00214563.jpg +Places365_test_00214564.jpg +Places365_test_00214574.jpg +Places365_test_00214582.jpg +Places365_test_00214595.jpg +Places365_test_00214616.jpg +Places365_test_00214617.jpg +Places365_test_00214618.jpg +Places365_test_00214619.jpg +Places365_test_00214625.jpg +Places365_test_00214640.jpg +Places365_test_00214688.jpg +Places365_test_00214712.jpg +Places365_test_00214720.jpg +Places365_test_00214724.jpg +Places365_test_00214726.jpg +Places365_test_00214747.jpg +Places365_test_00214774.jpg +Places365_test_00214800.jpg +Places365_test_00214801.jpg +Places365_test_00214835.jpg +Places365_test_00214857.jpg +Places365_test_00214869.jpg +Places365_test_00214876.jpg +Places365_test_00214879.jpg +Places365_test_00214889.jpg +Places365_test_00214951.jpg +Places365_test_00214968.jpg +Places365_test_00214972.jpg +Places365_test_00214977.jpg +Places365_test_00214990.jpg +Places365_test_00215000.jpg +Places365_test_00215011.jpg +Places365_test_00215013.jpg +Places365_test_00215014.jpg +Places365_test_00215037.jpg +Places365_test_00215042.jpg +Places365_test_00215050.jpg +Places365_test_00215105.jpg +Places365_test_00215109.jpg +Places365_test_00215126.jpg +Places365_test_00215132.jpg +Places365_test_00215141.jpg +Places365_test_00215142.jpg +Places365_test_00215143.jpg +Places365_test_00215161.jpg +Places365_test_00215183.jpg +Places365_test_00215191.jpg +Places365_test_00215201.jpg +Places365_test_00215221.jpg +Places365_test_00215225.jpg +Places365_test_00215227.jpg +Places365_test_00215235.jpg +Places365_test_00215258.jpg +Places365_test_00215268.jpg +Places365_test_00215269.jpg +Places365_test_00215289.jpg +Places365_test_00215291.jpg +Places365_test_00215305.jpg +Places365_test_00215312.jpg +Places365_test_00215331.jpg +Places365_test_00215339.jpg +Places365_test_00215341.jpg +Places365_test_00215343.jpg +Places365_test_00215347.jpg +Places365_test_00215367.jpg +Places365_test_00215371.jpg +Places365_test_00215379.jpg +Places365_test_00215398.jpg +Places365_test_00215400.jpg +Places365_test_00215401.jpg +Places365_test_00215404.jpg +Places365_test_00215416.jpg +Places365_test_00215455.jpg +Places365_test_00215456.jpg +Places365_test_00215469.jpg +Places365_test_00215481.jpg +Places365_test_00215482.jpg +Places365_test_00215501.jpg +Places365_test_00215514.jpg +Places365_test_00215526.jpg +Places365_test_00215528.jpg +Places365_test_00215539.jpg +Places365_test_00215547.jpg +Places365_test_00215559.jpg +Places365_test_00215560.jpg +Places365_test_00215581.jpg +Places365_test_00215586.jpg +Places365_test_00215606.jpg +Places365_test_00215615.jpg +Places365_test_00215617.jpg +Places365_test_00215623.jpg +Places365_test_00215638.jpg +Places365_test_00215659.jpg +Places365_test_00215672.jpg +Places365_test_00215677.jpg +Places365_test_00215701.jpg +Places365_test_00215722.jpg +Places365_test_00215724.jpg +Places365_test_00215741.jpg +Places365_test_00215754.jpg +Places365_test_00215767.jpg +Places365_test_00215772.jpg +Places365_test_00215798.jpg +Places365_test_00215801.jpg +Places365_test_00215822.jpg +Places365_test_00215825.jpg +Places365_test_00215844.jpg +Places365_test_00215851.jpg +Places365_test_00215852.jpg +Places365_test_00215858.jpg +Places365_test_00215860.jpg +Places365_test_00215872.jpg +Places365_test_00215873.jpg +Places365_test_00215878.jpg +Places365_test_00215879.jpg +Places365_test_00215885.jpg +Places365_test_00215895.jpg +Places365_test_00215901.jpg +Places365_test_00215926.jpg +Places365_test_00215928.jpg +Places365_test_00215962.jpg +Places365_test_00215965.jpg +Places365_test_00215969.jpg +Places365_test_00215978.jpg +Places365_test_00215994.jpg +Places365_test_00215997.jpg +Places365_test_00215998.jpg +Places365_test_00215999.jpg +Places365_test_00216012.jpg +Places365_test_00216017.jpg +Places365_test_00216024.jpg +Places365_test_00216031.jpg +Places365_test_00216059.jpg +Places365_test_00216080.jpg +Places365_test_00216100.jpg +Places365_test_00216148.jpg +Places365_test_00216151.jpg +Places365_test_00216153.jpg +Places365_test_00216158.jpg +Places365_test_00216165.jpg +Places365_test_00216167.jpg +Places365_test_00216168.jpg +Places365_test_00216181.jpg +Places365_test_00216193.jpg +Places365_test_00216194.jpg +Places365_test_00216215.jpg +Places365_test_00216250.jpg +Places365_test_00216260.jpg +Places365_test_00216262.jpg +Places365_test_00216265.jpg +Places365_test_00216272.jpg +Places365_test_00216276.jpg +Places365_test_00216279.jpg +Places365_test_00216293.jpg +Places365_test_00216294.jpg +Places365_test_00216302.jpg +Places365_test_00216327.jpg +Places365_test_00216331.jpg +Places365_test_00216338.jpg +Places365_test_00216340.jpg +Places365_test_00216341.jpg +Places365_test_00216344.jpg +Places365_test_00216351.jpg +Places365_test_00216371.jpg +Places365_test_00216377.jpg +Places365_test_00216392.jpg +Places365_test_00216395.jpg +Places365_test_00216399.jpg +Places365_test_00216409.jpg +Places365_test_00216412.jpg +Places365_test_00216426.jpg +Places365_test_00216442.jpg +Places365_test_00216446.jpg +Places365_test_00216484.jpg +Places365_test_00216497.jpg +Places365_test_00216500.jpg +Places365_test_00216523.jpg +Places365_test_00216535.jpg +Places365_test_00216538.jpg +Places365_test_00216546.jpg +Places365_test_00216547.jpg +Places365_test_00216558.jpg +Places365_test_00216567.jpg +Places365_test_00216600.jpg +Places365_test_00216611.jpg +Places365_test_00216625.jpg +Places365_test_00216626.jpg +Places365_test_00216637.jpg +Places365_test_00216693.jpg +Places365_test_00216714.jpg +Places365_test_00216727.jpg +Places365_test_00216733.jpg +Places365_test_00216740.jpg +Places365_test_00216744.jpg +Places365_test_00216754.jpg +Places365_test_00216755.jpg +Places365_test_00216757.jpg +Places365_test_00216764.jpg +Places365_test_00216772.jpg +Places365_test_00216784.jpg +Places365_test_00216791.jpg +Places365_test_00216803.jpg +Places365_test_00216807.jpg +Places365_test_00216820.jpg +Places365_test_00216861.jpg +Places365_test_00216863.jpg +Places365_test_00216864.jpg +Places365_test_00216876.jpg +Places365_test_00216897.jpg +Places365_test_00216913.jpg +Places365_test_00216915.jpg +Places365_test_00216919.jpg +Places365_test_00216921.jpg +Places365_test_00216929.jpg +Places365_test_00216945.jpg +Places365_test_00216953.jpg +Places365_test_00216954.jpg +Places365_test_00216969.jpg +Places365_test_00216974.jpg +Places365_test_00216977.jpg +Places365_test_00216978.jpg +Places365_test_00216992.jpg +Places365_test_00216998.jpg +Places365_test_00217032.jpg +Places365_test_00217069.jpg +Places365_test_00217087.jpg +Places365_test_00217092.jpg +Places365_test_00217095.jpg +Places365_test_00217098.jpg +Places365_test_00217166.jpg +Places365_test_00217184.jpg +Places365_test_00217190.jpg +Places365_test_00217191.jpg +Places365_test_00217196.jpg +Places365_test_00217207.jpg +Places365_test_00217208.jpg +Places365_test_00217220.jpg +Places365_test_00217223.jpg +Places365_test_00217259.jpg +Places365_test_00217265.jpg +Places365_test_00217267.jpg +Places365_test_00217275.jpg +Places365_test_00217277.jpg +Places365_test_00217281.jpg +Places365_test_00217309.jpg +Places365_test_00217326.jpg +Places365_test_00217336.jpg +Places365_test_00217360.jpg +Places365_test_00217380.jpg +Places365_test_00217388.jpg +Places365_test_00217391.jpg +Places365_test_00217392.jpg +Places365_test_00217416.jpg +Places365_test_00217422.jpg +Places365_test_00217445.jpg +Places365_test_00217452.jpg +Places365_test_00217457.jpg +Places365_test_00217460.jpg +Places365_test_00217464.jpg +Places365_test_00217465.jpg +Places365_test_00217471.jpg +Places365_test_00217517.jpg +Places365_test_00217533.jpg +Places365_test_00217539.jpg +Places365_test_00217552.jpg +Places365_test_00217558.jpg +Places365_test_00217590.jpg +Places365_test_00217593.jpg +Places365_test_00217611.jpg +Places365_test_00217614.jpg +Places365_test_00217630.jpg +Places365_test_00217631.jpg +Places365_test_00217633.jpg +Places365_test_00217653.jpg +Places365_test_00217658.jpg +Places365_test_00217661.jpg +Places365_test_00217668.jpg +Places365_test_00217681.jpg +Places365_test_00217686.jpg +Places365_test_00217692.jpg +Places365_test_00217700.jpg +Places365_test_00217703.jpg +Places365_test_00217705.jpg +Places365_test_00217720.jpg +Places365_test_00217747.jpg +Places365_test_00217759.jpg +Places365_test_00217760.jpg +Places365_test_00217788.jpg +Places365_test_00217811.jpg +Places365_test_00217819.jpg +Places365_test_00217828.jpg +Places365_test_00217835.jpg +Places365_test_00217842.jpg +Places365_test_00217847.jpg +Places365_test_00217858.jpg +Places365_test_00217867.jpg +Places365_test_00217873.jpg +Places365_test_00217888.jpg +Places365_test_00217909.jpg +Places365_test_00217910.jpg +Places365_test_00217933.jpg +Places365_test_00217988.jpg +Places365_test_00218004.jpg +Places365_test_00218018.jpg +Places365_test_00218029.jpg +Places365_test_00218076.jpg +Places365_test_00218084.jpg +Places365_test_00218086.jpg +Places365_test_00218102.jpg +Places365_test_00218105.jpg +Places365_test_00218126.jpg +Places365_test_00218129.jpg +Places365_test_00218130.jpg +Places365_test_00218152.jpg +Places365_test_00218189.jpg +Places365_test_00218201.jpg +Places365_test_00218241.jpg +Places365_test_00218250.jpg +Places365_test_00218261.jpg +Places365_test_00218271.jpg +Places365_test_00218275.jpg +Places365_test_00218287.jpg +Places365_test_00218292.jpg +Places365_test_00218310.jpg +Places365_test_00218351.jpg +Places365_test_00218356.jpg +Places365_test_00218380.jpg +Places365_test_00218387.jpg +Places365_test_00218392.jpg +Places365_test_00218402.jpg +Places365_test_00218412.jpg +Places365_test_00218413.jpg +Places365_test_00218433.jpg +Places365_test_00218436.jpg +Places365_test_00218438.jpg +Places365_test_00218442.jpg +Places365_test_00218447.jpg +Places365_test_00218457.jpg +Places365_test_00218460.jpg +Places365_test_00218461.jpg +Places365_test_00218465.jpg +Places365_test_00218482.jpg +Places365_test_00218500.jpg +Places365_test_00218510.jpg +Places365_test_00218515.jpg +Places365_test_00218526.jpg +Places365_test_00218544.jpg +Places365_test_00218548.jpg +Places365_test_00218560.jpg +Places365_test_00218564.jpg +Places365_test_00218584.jpg +Places365_test_00218596.jpg +Places365_test_00218606.jpg +Places365_test_00218607.jpg +Places365_test_00218610.jpg +Places365_test_00218616.jpg +Places365_test_00218620.jpg +Places365_test_00218625.jpg +Places365_test_00218626.jpg +Places365_test_00218631.jpg +Places365_test_00218632.jpg +Places365_test_00218637.jpg +Places365_test_00218662.jpg +Places365_test_00218672.jpg +Places365_test_00218676.jpg +Places365_test_00218677.jpg +Places365_test_00218683.jpg +Places365_test_00218703.jpg +Places365_test_00218714.jpg +Places365_test_00218733.jpg +Places365_test_00218756.jpg +Places365_test_00218774.jpg +Places365_test_00218779.jpg +Places365_test_00218781.jpg +Places365_test_00218787.jpg +Places365_test_00218799.jpg +Places365_test_00218808.jpg +Places365_test_00218809.jpg +Places365_test_00218810.jpg +Places365_test_00218815.jpg +Places365_test_00218820.jpg +Places365_test_00218825.jpg +Places365_test_00218826.jpg +Places365_test_00218829.jpg +Places365_test_00218830.jpg +Places365_test_00218842.jpg +Places365_test_00218851.jpg +Places365_test_00218855.jpg +Places365_test_00218857.jpg +Places365_test_00218859.jpg +Places365_test_00218887.jpg +Places365_test_00218897.jpg +Places365_test_00218909.jpg +Places365_test_00218943.jpg +Places365_test_00218947.jpg +Places365_test_00218970.jpg +Places365_test_00218972.jpg +Places365_test_00218980.jpg +Places365_test_00218984.jpg +Places365_test_00218997.jpg +Places365_test_00219001.jpg +Places365_test_00219004.jpg +Places365_test_00219014.jpg +Places365_test_00219020.jpg +Places365_test_00219044.jpg +Places365_test_00219073.jpg +Places365_test_00219111.jpg +Places365_test_00219112.jpg +Places365_test_00219115.jpg +Places365_test_00219116.jpg +Places365_test_00219122.jpg +Places365_test_00219139.jpg +Places365_test_00219142.jpg +Places365_test_00219152.jpg +Places365_test_00219163.jpg +Places365_test_00219164.jpg +Places365_test_00219170.jpg +Places365_test_00219185.jpg +Places365_test_00219219.jpg +Places365_test_00219225.jpg +Places365_test_00219231.jpg +Places365_test_00219236.jpg +Places365_test_00219237.jpg +Places365_test_00219243.jpg +Places365_test_00219244.jpg +Places365_test_00219247.jpg +Places365_test_00219266.jpg +Places365_test_00219275.jpg +Places365_test_00219292.jpg +Places365_test_00219295.jpg +Places365_test_00219296.jpg +Places365_test_00219359.jpg +Places365_test_00219378.jpg +Places365_test_00219380.jpg +Places365_test_00219396.jpg +Places365_test_00219405.jpg +Places365_test_00219420.jpg +Places365_test_00219426.jpg +Places365_test_00219432.jpg +Places365_test_00219461.jpg +Places365_test_00219489.jpg +Places365_test_00219495.jpg +Places365_test_00219505.jpg +Places365_test_00219511.jpg +Places365_test_00219521.jpg +Places365_test_00219527.jpg +Places365_test_00219539.jpg +Places365_test_00219551.jpg +Places365_test_00219561.jpg +Places365_test_00219574.jpg +Places365_test_00219615.jpg +Places365_test_00219638.jpg +Places365_test_00219678.jpg +Places365_test_00219679.jpg +Places365_test_00219680.jpg +Places365_test_00219688.jpg +Places365_test_00219694.jpg +Places365_test_00219699.jpg +Places365_test_00219701.jpg +Places365_test_00219703.jpg +Places365_test_00219704.jpg +Places365_test_00219709.jpg +Places365_test_00219726.jpg +Places365_test_00219740.jpg +Places365_test_00219752.jpg +Places365_test_00219756.jpg +Places365_test_00219762.jpg +Places365_test_00219774.jpg +Places365_test_00219776.jpg +Places365_test_00219779.jpg +Places365_test_00219796.jpg +Places365_test_00219807.jpg +Places365_test_00219809.jpg +Places365_test_00219825.jpg +Places365_test_00219837.jpg +Places365_test_00219850.jpg +Places365_test_00219861.jpg +Places365_test_00219866.jpg +Places365_test_00219889.jpg +Places365_test_00219890.jpg +Places365_test_00219891.jpg +Places365_test_00219895.jpg +Places365_test_00219901.jpg +Places365_test_00219904.jpg +Places365_test_00219937.jpg +Places365_test_00219962.jpg +Places365_test_00219964.jpg +Places365_test_00219965.jpg +Places365_test_00219969.jpg +Places365_test_00219971.jpg +Places365_test_00219986.jpg +Places365_test_00219990.jpg +Places365_test_00220004.jpg +Places365_test_00220006.jpg +Places365_test_00220010.jpg +Places365_test_00220011.jpg +Places365_test_00220025.jpg +Places365_test_00220030.jpg +Places365_test_00220042.jpg +Places365_test_00220046.jpg +Places365_test_00220049.jpg +Places365_test_00220053.jpg +Places365_test_00220058.jpg +Places365_test_00220108.jpg +Places365_test_00220111.jpg +Places365_test_00220116.jpg +Places365_test_00220126.jpg +Places365_test_00220142.jpg +Places365_test_00220146.jpg +Places365_test_00220152.jpg +Places365_test_00220160.jpg +Places365_test_00220184.jpg +Places365_test_00220194.jpg +Places365_test_00220200.jpg +Places365_test_00220211.jpg +Places365_test_00220234.jpg +Places365_test_00220245.jpg +Places365_test_00220247.jpg +Places365_test_00220279.jpg +Places365_test_00220295.jpg +Places365_test_00220299.jpg +Places365_test_00220303.jpg +Places365_test_00220313.jpg +Places365_test_00220315.jpg +Places365_test_00220317.jpg +Places365_test_00220318.jpg +Places365_test_00220319.jpg +Places365_test_00220336.jpg +Places365_test_00220339.jpg +Places365_test_00220347.jpg +Places365_test_00220358.jpg +Places365_test_00220378.jpg +Places365_test_00220380.jpg +Places365_test_00220382.jpg +Places365_test_00220384.jpg +Places365_test_00220406.jpg +Places365_test_00220409.jpg +Places365_test_00220411.jpg +Places365_test_00220418.jpg +Places365_test_00220424.jpg +Places365_test_00220438.jpg +Places365_test_00220452.jpg +Places365_test_00220484.jpg +Places365_test_00220486.jpg +Places365_test_00220490.jpg +Places365_test_00220500.jpg +Places365_test_00220502.jpg +Places365_test_00220508.jpg +Places365_test_00220512.jpg +Places365_test_00220516.jpg +Places365_test_00220527.jpg +Places365_test_00220569.jpg +Places365_test_00220573.jpg +Places365_test_00220574.jpg +Places365_test_00220591.jpg +Places365_test_00220594.jpg +Places365_test_00220595.jpg +Places365_test_00220604.jpg +Places365_test_00220625.jpg +Places365_test_00220643.jpg +Places365_test_00220644.jpg +Places365_test_00220658.jpg +Places365_test_00220683.jpg +Places365_test_00220687.jpg +Places365_test_00220697.jpg +Places365_test_00220699.jpg +Places365_test_00220733.jpg +Places365_test_00220741.jpg +Places365_test_00220745.jpg +Places365_test_00220773.jpg +Places365_test_00220781.jpg +Places365_test_00220788.jpg +Places365_test_00220791.jpg +Places365_test_00220795.jpg +Places365_test_00220796.jpg +Places365_test_00220800.jpg +Places365_test_00220801.jpg +Places365_test_00220812.jpg +Places365_test_00220821.jpg +Places365_test_00220824.jpg +Places365_test_00220825.jpg +Places365_test_00220827.jpg +Places365_test_00220846.jpg +Places365_test_00220855.jpg +Places365_test_00220857.jpg +Places365_test_00220864.jpg +Places365_test_00220879.jpg +Places365_test_00220890.jpg +Places365_test_00220907.jpg +Places365_test_00220921.jpg +Places365_test_00220930.jpg +Places365_test_00220949.jpg +Places365_test_00220970.jpg +Places365_test_00220977.jpg +Places365_test_00220992.jpg +Places365_test_00221015.jpg +Places365_test_00221016.jpg +Places365_test_00221019.jpg +Places365_test_00221021.jpg +Places365_test_00221030.jpg +Places365_test_00221046.jpg +Places365_test_00221065.jpg +Places365_test_00221091.jpg +Places365_test_00221099.jpg +Places365_test_00221107.jpg +Places365_test_00221117.jpg +Places365_test_00221126.jpg +Places365_test_00221131.jpg +Places365_test_00221148.jpg +Places365_test_00221151.jpg +Places365_test_00221163.jpg +Places365_test_00221176.jpg +Places365_test_00221181.jpg +Places365_test_00221204.jpg +Places365_test_00221214.jpg +Places365_test_00221230.jpg +Places365_test_00221236.jpg +Places365_test_00221239.jpg +Places365_test_00221252.jpg +Places365_test_00221259.jpg +Places365_test_00221265.jpg +Places365_test_00221273.jpg +Places365_test_00221278.jpg +Places365_test_00221279.jpg +Places365_test_00221296.jpg +Places365_test_00221310.jpg +Places365_test_00221324.jpg +Places365_test_00221347.jpg +Places365_test_00221348.jpg +Places365_test_00221353.jpg +Places365_test_00221364.jpg +Places365_test_00221367.jpg +Places365_test_00221370.jpg +Places365_test_00221397.jpg +Places365_test_00221404.jpg +Places365_test_00221411.jpg +Places365_test_00221412.jpg +Places365_test_00221435.jpg +Places365_test_00221460.jpg +Places365_test_00221468.jpg +Places365_test_00221470.jpg +Places365_test_00221484.jpg +Places365_test_00221509.jpg +Places365_test_00221510.jpg +Places365_test_00221520.jpg +Places365_test_00221532.jpg +Places365_test_00221544.jpg +Places365_test_00221551.jpg +Places365_test_00221552.jpg +Places365_test_00221568.jpg +Places365_test_00221571.jpg +Places365_test_00221575.jpg +Places365_test_00221586.jpg +Places365_test_00221594.jpg +Places365_test_00221610.jpg +Places365_test_00221620.jpg +Places365_test_00221627.jpg +Places365_test_00221648.jpg +Places365_test_00221652.jpg +Places365_test_00221663.jpg +Places365_test_00221725.jpg +Places365_test_00221738.jpg +Places365_test_00221757.jpg +Places365_test_00221780.jpg +Places365_test_00221785.jpg +Places365_test_00221787.jpg +Places365_test_00221805.jpg +Places365_test_00221812.jpg +Places365_test_00221839.jpg +Places365_test_00221842.jpg +Places365_test_00221860.jpg +Places365_test_00221868.jpg +Places365_test_00221875.jpg +Places365_test_00221876.jpg +Places365_test_00221894.jpg +Places365_test_00221897.jpg +Places365_test_00221905.jpg +Places365_test_00221912.jpg +Places365_test_00221938.jpg +Places365_test_00221957.jpg +Places365_test_00221960.jpg +Places365_test_00221968.jpg +Places365_test_00221971.jpg +Places365_test_00221982.jpg +Places365_test_00221994.jpg +Places365_test_00221995.jpg +Places365_test_00222008.jpg +Places365_test_00222016.jpg +Places365_test_00222018.jpg +Places365_test_00222036.jpg +Places365_test_00222049.jpg +Places365_test_00222060.jpg +Places365_test_00222061.jpg +Places365_test_00222083.jpg +Places365_test_00222094.jpg +Places365_test_00222099.jpg +Places365_test_00222108.jpg +Places365_test_00222110.jpg +Places365_test_00222120.jpg +Places365_test_00222136.jpg +Places365_test_00222140.jpg +Places365_test_00222151.jpg +Places365_test_00222155.jpg +Places365_test_00222183.jpg +Places365_test_00222194.jpg +Places365_test_00222197.jpg +Places365_test_00222206.jpg +Places365_test_00222209.jpg +Places365_test_00222212.jpg +Places365_test_00222214.jpg +Places365_test_00222239.jpg +Places365_test_00222261.jpg +Places365_test_00222265.jpg +Places365_test_00222267.jpg +Places365_test_00222269.jpg +Places365_test_00222271.jpg +Places365_test_00222275.jpg +Places365_test_00222284.jpg +Places365_test_00222298.jpg +Places365_test_00222312.jpg +Places365_test_00222330.jpg +Places365_test_00222351.jpg +Places365_test_00222376.jpg +Places365_test_00222415.jpg +Places365_test_00222417.jpg +Places365_test_00222419.jpg +Places365_test_00222428.jpg +Places365_test_00222439.jpg +Places365_test_00222444.jpg +Places365_test_00222448.jpg +Places365_test_00222463.jpg +Places365_test_00222470.jpg +Places365_test_00222472.jpg +Places365_test_00222475.jpg +Places365_test_00222479.jpg +Places365_test_00222485.jpg +Places365_test_00222499.jpg +Places365_test_00222500.jpg +Places365_test_00222510.jpg +Places365_test_00222512.jpg +Places365_test_00222513.jpg +Places365_test_00222529.jpg +Places365_test_00222531.jpg +Places365_test_00222538.jpg +Places365_test_00222552.jpg +Places365_test_00222554.jpg +Places365_test_00222562.jpg +Places365_test_00222568.jpg +Places365_test_00222572.jpg +Places365_test_00222598.jpg +Places365_test_00222608.jpg +Places365_test_00222613.jpg +Places365_test_00222623.jpg +Places365_test_00222638.jpg +Places365_test_00222646.jpg +Places365_test_00222669.jpg +Places365_test_00222698.jpg +Places365_test_00222716.jpg +Places365_test_00222724.jpg +Places365_test_00222748.jpg +Places365_test_00222770.jpg +Places365_test_00222780.jpg +Places365_test_00222803.jpg +Places365_test_00222811.jpg +Places365_test_00222819.jpg +Places365_test_00222820.jpg +Places365_test_00222839.jpg +Places365_test_00222844.jpg +Places365_test_00222847.jpg +Places365_test_00222849.jpg +Places365_test_00222853.jpg +Places365_test_00222854.jpg +Places365_test_00222868.jpg +Places365_test_00222878.jpg +Places365_test_00222884.jpg +Places365_test_00222904.jpg +Places365_test_00222952.jpg +Places365_test_00222961.jpg +Places365_test_00222962.jpg +Places365_test_00222969.jpg +Places365_test_00222982.jpg +Places365_test_00222988.jpg +Places365_test_00222997.jpg +Places365_test_00223007.jpg +Places365_test_00223012.jpg +Places365_test_00223014.jpg +Places365_test_00223019.jpg +Places365_test_00223021.jpg +Places365_test_00223029.jpg +Places365_test_00223032.jpg +Places365_test_00223035.jpg +Places365_test_00223037.jpg +Places365_test_00223048.jpg +Places365_test_00223051.jpg +Places365_test_00223052.jpg +Places365_test_00223054.jpg +Places365_test_00223065.jpg +Places365_test_00223066.jpg +Places365_test_00223072.jpg +Places365_test_00223073.jpg +Places365_test_00223080.jpg +Places365_test_00223090.jpg +Places365_test_00223110.jpg +Places365_test_00223123.jpg +Places365_test_00223126.jpg +Places365_test_00223140.jpg +Places365_test_00223153.jpg +Places365_test_00223160.jpg +Places365_test_00223176.jpg +Places365_test_00223190.jpg +Places365_test_00223195.jpg +Places365_test_00223199.jpg +Places365_test_00223205.jpg +Places365_test_00223206.jpg +Places365_test_00223208.jpg +Places365_test_00223210.jpg +Places365_test_00223213.jpg +Places365_test_00223220.jpg +Places365_test_00223250.jpg +Places365_test_00223283.jpg +Places365_test_00223299.jpg +Places365_test_00223308.jpg +Places365_test_00223309.jpg +Places365_test_00223326.jpg +Places365_test_00223328.jpg +Places365_test_00223331.jpg +Places365_test_00223336.jpg +Places365_test_00223338.jpg +Places365_test_00223344.jpg +Places365_test_00223362.jpg +Places365_test_00223364.jpg +Places365_test_00223369.jpg +Places365_test_00223370.jpg +Places365_test_00223416.jpg +Places365_test_00223422.jpg +Places365_test_00223438.jpg +Places365_test_00223445.jpg +Places365_test_00223450.jpg +Places365_test_00223458.jpg +Places365_test_00223460.jpg +Places365_test_00223480.jpg +Places365_test_00223506.jpg +Places365_test_00223507.jpg +Places365_test_00223509.jpg +Places365_test_00223511.jpg +Places365_test_00223518.jpg +Places365_test_00223525.jpg +Places365_test_00223543.jpg +Places365_test_00223549.jpg +Places365_test_00223560.jpg +Places365_test_00223585.jpg +Places365_test_00223596.jpg +Places365_test_00223597.jpg +Places365_test_00223625.jpg +Places365_test_00223636.jpg +Places365_test_00223663.jpg +Places365_test_00223666.jpg +Places365_test_00223684.jpg +Places365_test_00223693.jpg +Places365_test_00223697.jpg +Places365_test_00223700.jpg +Places365_test_00223709.jpg +Places365_test_00223711.jpg +Places365_test_00223715.jpg +Places365_test_00223719.jpg +Places365_test_00223734.jpg +Places365_test_00223745.jpg +Places365_test_00223747.jpg +Places365_test_00223754.jpg +Places365_test_00223758.jpg +Places365_test_00223759.jpg +Places365_test_00223762.jpg +Places365_test_00223768.jpg +Places365_test_00223772.jpg +Places365_test_00223782.jpg +Places365_test_00223808.jpg +Places365_test_00223822.jpg +Places365_test_00223829.jpg +Places365_test_00223849.jpg +Places365_test_00223850.jpg +Places365_test_00223854.jpg +Places365_test_00223860.jpg +Places365_test_00223864.jpg +Places365_test_00223872.jpg +Places365_test_00223891.jpg +Places365_test_00223914.jpg +Places365_test_00223928.jpg +Places365_test_00223940.jpg +Places365_test_00223945.jpg +Places365_test_00223972.jpg +Places365_test_00223980.jpg +Places365_test_00223983.jpg +Places365_test_00223989.jpg +Places365_test_00224005.jpg +Places365_test_00224021.jpg +Places365_test_00224031.jpg +Places365_test_00224033.jpg +Places365_test_00224047.jpg +Places365_test_00224048.jpg +Places365_test_00224054.jpg +Places365_test_00224057.jpg +Places365_test_00224060.jpg +Places365_test_00224065.jpg +Places365_test_00224071.jpg +Places365_test_00224074.jpg +Places365_test_00224078.jpg +Places365_test_00224095.jpg +Places365_test_00224104.jpg +Places365_test_00224105.jpg +Places365_test_00224118.jpg +Places365_test_00224122.jpg +Places365_test_00224127.jpg +Places365_test_00224129.jpg +Places365_test_00224134.jpg +Places365_test_00224150.jpg +Places365_test_00224156.jpg +Places365_test_00224158.jpg +Places365_test_00224167.jpg +Places365_test_00224191.jpg +Places365_test_00224195.jpg +Places365_test_00224201.jpg +Places365_test_00224209.jpg +Places365_test_00224281.jpg +Places365_test_00224292.jpg +Places365_test_00224293.jpg +Places365_test_00224295.jpg +Places365_test_00224297.jpg +Places365_test_00224308.jpg +Places365_test_00224313.jpg +Places365_test_00224321.jpg +Places365_test_00224338.jpg +Places365_test_00224339.jpg +Places365_test_00224365.jpg +Places365_test_00224389.jpg +Places365_test_00224418.jpg +Places365_test_00224430.jpg +Places365_test_00224433.jpg +Places365_test_00224443.jpg +Places365_test_00224444.jpg +Places365_test_00224447.jpg +Places365_test_00224448.jpg +Places365_test_00224464.jpg +Places365_test_00224501.jpg +Places365_test_00224505.jpg +Places365_test_00224516.jpg +Places365_test_00224527.jpg +Places365_test_00224534.jpg +Places365_test_00224539.jpg +Places365_test_00224548.jpg +Places365_test_00224573.jpg +Places365_test_00224585.jpg +Places365_test_00224600.jpg +Places365_test_00224605.jpg +Places365_test_00224626.jpg +Places365_test_00224650.jpg +Places365_test_00224652.jpg +Places365_test_00224656.jpg +Places365_test_00224677.jpg +Places365_test_00224690.jpg +Places365_test_00224700.jpg +Places365_test_00224722.jpg +Places365_test_00224736.jpg +Places365_test_00224753.jpg +Places365_test_00224758.jpg +Places365_test_00224762.jpg +Places365_test_00224774.jpg +Places365_test_00224784.jpg +Places365_test_00224796.jpg +Places365_test_00224813.jpg +Places365_test_00224823.jpg +Places365_test_00224837.jpg +Places365_test_00224842.jpg +Places365_test_00224847.jpg +Places365_test_00224856.jpg +Places365_test_00224858.jpg +Places365_test_00224866.jpg +Places365_test_00224899.jpg +Places365_test_00224911.jpg +Places365_test_00224913.jpg +Places365_test_00224918.jpg +Places365_test_00224935.jpg +Places365_test_00224943.jpg +Places365_test_00224946.jpg +Places365_test_00224978.jpg +Places365_test_00224987.jpg +Places365_test_00225006.jpg +Places365_test_00225008.jpg +Places365_test_00225009.jpg +Places365_test_00225049.jpg +Places365_test_00225085.jpg +Places365_test_00225086.jpg +Places365_test_00225091.jpg +Places365_test_00225103.jpg +Places365_test_00225107.jpg +Places365_test_00225110.jpg +Places365_test_00225115.jpg +Places365_test_00225124.jpg +Places365_test_00225147.jpg +Places365_test_00225179.jpg +Places365_test_00225184.jpg +Places365_test_00225190.jpg +Places365_test_00225200.jpg +Places365_test_00225204.jpg +Places365_test_00225214.jpg +Places365_test_00225219.jpg +Places365_test_00225252.jpg +Places365_test_00225270.jpg +Places365_test_00225277.jpg +Places365_test_00225280.jpg +Places365_test_00225309.jpg +Places365_test_00225340.jpg +Places365_test_00225343.jpg +Places365_test_00225351.jpg +Places365_test_00225357.jpg +Places365_test_00225361.jpg +Places365_test_00225366.jpg +Places365_test_00225369.jpg +Places365_test_00225371.jpg +Places365_test_00225375.jpg +Places365_test_00225376.jpg +Places365_test_00225378.jpg +Places365_test_00225379.jpg +Places365_test_00225381.jpg +Places365_test_00225417.jpg +Places365_test_00225422.jpg +Places365_test_00225441.jpg +Places365_test_00225452.jpg +Places365_test_00225465.jpg +Places365_test_00225470.jpg +Places365_test_00225471.jpg +Places365_test_00225473.jpg +Places365_test_00225476.jpg +Places365_test_00225482.jpg +Places365_test_00225486.jpg +Places365_test_00225488.jpg +Places365_test_00225496.jpg +Places365_test_00225513.jpg +Places365_test_00225516.jpg +Places365_test_00225525.jpg +Places365_test_00225528.jpg +Places365_test_00225540.jpg +Places365_test_00225550.jpg +Places365_test_00225557.jpg +Places365_test_00225561.jpg +Places365_test_00225577.jpg +Places365_test_00225581.jpg +Places365_test_00225583.jpg +Places365_test_00225586.jpg +Places365_test_00225595.jpg +Places365_test_00225614.jpg +Places365_test_00225650.jpg +Places365_test_00225653.jpg +Places365_test_00225656.jpg +Places365_test_00225659.jpg +Places365_test_00225676.jpg +Places365_test_00225678.jpg +Places365_test_00225695.jpg +Places365_test_00225696.jpg +Places365_test_00225697.jpg +Places365_test_00225711.jpg +Places365_test_00225714.jpg +Places365_test_00225719.jpg +Places365_test_00225739.jpg +Places365_test_00225761.jpg +Places365_test_00225767.jpg +Places365_test_00225775.jpg +Places365_test_00225778.jpg +Places365_test_00225790.jpg +Places365_test_00225807.jpg +Places365_test_00225813.jpg +Places365_test_00225824.jpg +Places365_test_00225825.jpg +Places365_test_00225826.jpg +Places365_test_00225832.jpg +Places365_test_00225866.jpg +Places365_test_00225887.jpg +Places365_test_00225898.jpg +Places365_test_00225905.jpg +Places365_test_00225910.jpg +Places365_test_00225913.jpg +Places365_test_00225914.jpg +Places365_test_00225917.jpg +Places365_test_00225932.jpg +Places365_test_00225970.jpg +Places365_test_00225998.jpg +Places365_test_00226008.jpg +Places365_test_00226010.jpg +Places365_test_00226016.jpg +Places365_test_00226065.jpg +Places365_test_00226071.jpg +Places365_test_00226083.jpg +Places365_test_00226094.jpg +Places365_test_00226117.jpg +Places365_test_00226125.jpg +Places365_test_00226144.jpg +Places365_test_00226215.jpg +Places365_test_00226218.jpg +Places365_test_00226239.jpg +Places365_test_00226240.jpg +Places365_test_00226258.jpg +Places365_test_00226268.jpg +Places365_test_00226271.jpg +Places365_test_00226279.jpg +Places365_test_00226288.jpg +Places365_test_00226295.jpg +Places365_test_00226317.jpg +Places365_test_00226319.jpg +Places365_test_00226325.jpg +Places365_test_00226330.jpg +Places365_test_00226335.jpg +Places365_test_00226343.jpg +Places365_test_00226345.jpg +Places365_test_00226352.jpg +Places365_test_00226371.jpg +Places365_test_00226378.jpg +Places365_test_00226389.jpg +Places365_test_00226392.jpg +Places365_test_00226394.jpg +Places365_test_00226408.jpg +Places365_test_00226419.jpg +Places365_test_00226424.jpg +Places365_test_00226430.jpg +Places365_test_00226432.jpg +Places365_test_00226443.jpg +Places365_test_00226460.jpg +Places365_test_00226461.jpg +Places365_test_00226464.jpg +Places365_test_00226470.jpg +Places365_test_00226516.jpg +Places365_test_00226528.jpg +Places365_test_00226542.jpg +Places365_test_00226547.jpg +Places365_test_00226563.jpg +Places365_test_00226582.jpg +Places365_test_00226594.jpg +Places365_test_00226598.jpg +Places365_test_00226602.jpg +Places365_test_00226604.jpg +Places365_test_00226619.jpg +Places365_test_00226620.jpg +Places365_test_00226621.jpg +Places365_test_00226622.jpg +Places365_test_00226623.jpg +Places365_test_00226624.jpg +Places365_test_00226628.jpg +Places365_test_00226646.jpg +Places365_test_00226650.jpg +Places365_test_00226677.jpg +Places365_test_00226682.jpg +Places365_test_00226698.jpg +Places365_test_00226718.jpg +Places365_test_00226722.jpg +Places365_test_00226725.jpg +Places365_test_00226726.jpg +Places365_test_00226728.jpg +Places365_test_00226748.jpg +Places365_test_00226769.jpg +Places365_test_00226796.jpg +Places365_test_00226805.jpg +Places365_test_00226812.jpg +Places365_test_00226820.jpg +Places365_test_00226830.jpg +Places365_test_00226832.jpg +Places365_test_00226841.jpg +Places365_test_00226860.jpg +Places365_test_00226865.jpg +Places365_test_00226872.jpg +Places365_test_00226879.jpg +Places365_test_00226880.jpg +Places365_test_00226885.jpg +Places365_test_00226894.jpg +Places365_test_00226923.jpg +Places365_test_00226952.jpg +Places365_test_00226963.jpg +Places365_test_00226976.jpg +Places365_test_00226986.jpg +Places365_test_00226987.jpg +Places365_test_00226999.jpg +Places365_test_00227003.jpg +Places365_test_00227008.jpg +Places365_test_00227011.jpg +Places365_test_00227039.jpg +Places365_test_00227054.jpg +Places365_test_00227086.jpg +Places365_test_00227095.jpg +Places365_test_00227101.jpg +Places365_test_00227119.jpg +Places365_test_00227127.jpg +Places365_test_00227137.jpg +Places365_test_00227153.jpg +Places365_test_00227155.jpg +Places365_test_00227165.jpg +Places365_test_00227168.jpg +Places365_test_00227175.jpg +Places365_test_00227195.jpg +Places365_test_00227197.jpg +Places365_test_00227199.jpg +Places365_test_00227206.jpg +Places365_test_00227212.jpg +Places365_test_00227216.jpg +Places365_test_00227233.jpg +Places365_test_00227253.jpg +Places365_test_00227265.jpg +Places365_test_00227298.jpg +Places365_test_00227313.jpg +Places365_test_00227317.jpg +Places365_test_00227318.jpg +Places365_test_00227323.jpg +Places365_test_00227325.jpg +Places365_test_00227333.jpg +Places365_test_00227349.jpg +Places365_test_00227354.jpg +Places365_test_00227362.jpg +Places365_test_00227363.jpg +Places365_test_00227364.jpg +Places365_test_00227368.jpg +Places365_test_00227393.jpg +Places365_test_00227395.jpg +Places365_test_00227406.jpg +Places365_test_00227416.jpg +Places365_test_00227436.jpg +Places365_test_00227438.jpg +Places365_test_00227439.jpg +Places365_test_00227453.jpg +Places365_test_00227455.jpg +Places365_test_00227490.jpg +Places365_test_00227491.jpg +Places365_test_00227500.jpg +Places365_test_00227549.jpg +Places365_test_00227558.jpg +Places365_test_00227569.jpg +Places365_test_00227590.jpg +Places365_test_00227604.jpg +Places365_test_00227607.jpg +Places365_test_00227608.jpg +Places365_test_00227636.jpg +Places365_test_00227638.jpg +Places365_test_00227642.jpg +Places365_test_00227645.jpg +Places365_test_00227650.jpg +Places365_test_00227656.jpg +Places365_test_00227661.jpg +Places365_test_00227695.jpg +Places365_test_00227696.jpg +Places365_test_00227700.jpg +Places365_test_00227702.jpg +Places365_test_00227709.jpg +Places365_test_00227711.jpg +Places365_test_00227716.jpg +Places365_test_00227718.jpg +Places365_test_00227727.jpg +Places365_test_00227735.jpg +Places365_test_00227747.jpg +Places365_test_00227761.jpg +Places365_test_00227772.jpg +Places365_test_00227777.jpg +Places365_test_00227779.jpg +Places365_test_00227783.jpg +Places365_test_00227793.jpg +Places365_test_00227810.jpg +Places365_test_00227812.jpg +Places365_test_00227819.jpg +Places365_test_00227823.jpg +Places365_test_00227836.jpg +Places365_test_00227839.jpg +Places365_test_00227840.jpg +Places365_test_00227854.jpg +Places365_test_00227891.jpg +Places365_test_00227904.jpg +Places365_test_00227907.jpg +Places365_test_00227924.jpg +Places365_test_00227927.jpg +Places365_test_00227935.jpg +Places365_test_00227938.jpg +Places365_test_00227953.jpg +Places365_test_00227961.jpg +Places365_test_00227985.jpg +Places365_test_00228001.jpg +Places365_test_00228012.jpg +Places365_test_00228013.jpg +Places365_test_00228027.jpg +Places365_test_00228029.jpg +Places365_test_00228039.jpg +Places365_test_00228041.jpg +Places365_test_00228054.jpg +Places365_test_00228060.jpg +Places365_test_00228063.jpg +Places365_test_00228081.jpg +Places365_test_00228094.jpg +Places365_test_00228106.jpg +Places365_test_00228108.jpg +Places365_test_00228111.jpg +Places365_test_00228156.jpg +Places365_test_00228172.jpg +Places365_test_00228175.jpg +Places365_test_00228176.jpg +Places365_test_00228193.jpg +Places365_test_00228200.jpg +Places365_test_00228204.jpg +Places365_test_00228210.jpg +Places365_test_00228215.jpg +Places365_test_00228226.jpg +Places365_test_00228233.jpg +Places365_test_00228234.jpg +Places365_test_00228241.jpg +Places365_test_00228246.jpg +Places365_test_00228248.jpg +Places365_test_00228253.jpg +Places365_test_00228256.jpg +Places365_test_00228257.jpg +Places365_test_00228286.jpg +Places365_test_00228299.jpg +Places365_test_00228301.jpg +Places365_test_00228306.jpg +Places365_test_00228310.jpg +Places365_test_00228314.jpg +Places365_test_00228316.jpg +Places365_test_00228318.jpg +Places365_test_00228322.jpg +Places365_test_00228334.jpg +Places365_test_00228340.jpg +Places365_test_00228346.jpg +Places365_test_00228356.jpg +Places365_test_00228363.jpg +Places365_test_00228364.jpg +Places365_test_00228378.jpg +Places365_test_00228386.jpg +Places365_test_00228401.jpg +Places365_test_00228414.jpg +Places365_test_00228429.jpg +Places365_test_00228444.jpg +Places365_test_00228452.jpg +Places365_test_00228467.jpg +Places365_test_00228492.jpg +Places365_test_00228506.jpg +Places365_test_00228508.jpg +Places365_test_00228548.jpg +Places365_test_00228557.jpg +Places365_test_00228564.jpg +Places365_test_00228569.jpg +Places365_test_00228578.jpg +Places365_test_00228582.jpg +Places365_test_00228583.jpg +Places365_test_00228596.jpg +Places365_test_00228623.jpg +Places365_test_00228647.jpg +Places365_test_00228670.jpg +Places365_test_00228711.jpg +Places365_test_00228722.jpg +Places365_test_00228723.jpg +Places365_test_00228730.jpg +Places365_test_00228733.jpg +Places365_test_00228734.jpg +Places365_test_00228749.jpg +Places365_test_00228765.jpg +Places365_test_00228766.jpg +Places365_test_00228778.jpg +Places365_test_00228790.jpg +Places365_test_00228827.jpg +Places365_test_00228843.jpg +Places365_test_00228855.jpg +Places365_test_00228901.jpg +Places365_test_00228923.jpg +Places365_test_00228927.jpg +Places365_test_00228936.jpg +Places365_test_00228940.jpg +Places365_test_00228942.jpg +Places365_test_00228953.jpg +Places365_test_00228965.jpg +Places365_test_00228967.jpg +Places365_test_00228979.jpg +Places365_test_00228986.jpg +Places365_test_00228991.jpg +Places365_test_00228996.jpg +Places365_test_00229006.jpg +Places365_test_00229010.jpg +Places365_test_00229013.jpg +Places365_test_00229019.jpg +Places365_test_00229027.jpg +Places365_test_00229042.jpg +Places365_test_00229062.jpg +Places365_test_00229083.jpg +Places365_test_00229095.jpg +Places365_test_00229107.jpg +Places365_test_00229125.jpg +Places365_test_00229126.jpg +Places365_test_00229127.jpg +Places365_test_00229134.jpg +Places365_test_00229142.jpg +Places365_test_00229148.jpg +Places365_test_00229156.jpg +Places365_test_00229162.jpg +Places365_test_00229176.jpg +Places365_test_00229180.jpg +Places365_test_00229181.jpg +Places365_test_00229186.jpg +Places365_test_00229194.jpg +Places365_test_00229196.jpg +Places365_test_00229217.jpg +Places365_test_00229219.jpg +Places365_test_00229243.jpg +Places365_test_00229251.jpg +Places365_test_00229276.jpg +Places365_test_00229282.jpg +Places365_test_00229292.jpg +Places365_test_00229305.jpg +Places365_test_00229307.jpg +Places365_test_00229309.jpg +Places365_test_00229313.jpg +Places365_test_00229320.jpg +Places365_test_00229323.jpg +Places365_test_00229336.jpg +Places365_test_00229338.jpg +Places365_test_00229352.jpg +Places365_test_00229357.jpg +Places365_test_00229367.jpg +Places365_test_00229375.jpg +Places365_test_00229384.jpg +Places365_test_00229389.jpg +Places365_test_00229394.jpg +Places365_test_00229395.jpg +Places365_test_00229396.jpg +Places365_test_00229416.jpg +Places365_test_00229450.jpg +Places365_test_00229452.jpg +Places365_test_00229458.jpg +Places365_test_00229463.jpg +Places365_test_00229479.jpg +Places365_test_00229488.jpg +Places365_test_00229514.jpg +Places365_test_00229528.jpg +Places365_test_00229529.jpg +Places365_test_00229534.jpg +Places365_test_00229558.jpg +Places365_test_00229603.jpg +Places365_test_00229612.jpg +Places365_test_00229630.jpg +Places365_test_00229635.jpg +Places365_test_00229655.jpg +Places365_test_00229663.jpg +Places365_test_00229676.jpg +Places365_test_00229695.jpg +Places365_test_00229700.jpg +Places365_test_00229707.jpg +Places365_test_00229732.jpg +Places365_test_00229739.jpg +Places365_test_00229744.jpg +Places365_test_00229747.jpg +Places365_test_00229763.jpg +Places365_test_00229773.jpg +Places365_test_00229774.jpg +Places365_test_00229775.jpg +Places365_test_00229777.jpg +Places365_test_00229789.jpg +Places365_test_00229798.jpg +Places365_test_00229808.jpg +Places365_test_00229818.jpg +Places365_test_00229825.jpg +Places365_test_00229833.jpg +Places365_test_00229847.jpg +Places365_test_00229852.jpg +Places365_test_00229856.jpg +Places365_test_00229862.jpg +Places365_test_00229872.jpg +Places365_test_00229901.jpg +Places365_test_00229911.jpg +Places365_test_00229949.jpg +Places365_test_00229952.jpg +Places365_test_00229954.jpg +Places365_test_00229963.jpg +Places365_test_00229969.jpg +Places365_test_00229974.jpg +Places365_test_00229994.jpg +Places365_test_00230001.jpg +Places365_test_00230009.jpg +Places365_test_00230022.jpg +Places365_test_00230074.jpg +Places365_test_00230094.jpg +Places365_test_00230110.jpg +Places365_test_00230115.jpg +Places365_test_00230133.jpg +Places365_test_00230138.jpg +Places365_test_00230140.jpg +Places365_test_00230145.jpg +Places365_test_00230149.jpg +Places365_test_00230172.jpg +Places365_test_00230180.jpg +Places365_test_00230186.jpg +Places365_test_00230190.jpg +Places365_test_00230200.jpg +Places365_test_00230212.jpg +Places365_test_00230220.jpg +Places365_test_00230243.jpg +Places365_test_00230251.jpg +Places365_test_00230263.jpg +Places365_test_00230280.jpg +Places365_test_00230284.jpg +Places365_test_00230295.jpg +Places365_test_00230302.jpg +Places365_test_00230305.jpg +Places365_test_00230314.jpg +Places365_test_00230328.jpg +Places365_test_00230338.jpg +Places365_test_00230390.jpg +Places365_test_00230399.jpg +Places365_test_00230404.jpg +Places365_test_00230409.jpg +Places365_test_00230420.jpg +Places365_test_00230428.jpg +Places365_test_00230453.jpg +Places365_test_00230460.jpg +Places365_test_00230465.jpg +Places365_test_00230469.jpg +Places365_test_00230476.jpg +Places365_test_00230509.jpg +Places365_test_00230513.jpg +Places365_test_00230561.jpg +Places365_test_00230584.jpg +Places365_test_00230590.jpg +Places365_test_00230594.jpg +Places365_test_00230600.jpg +Places365_test_00230640.jpg +Places365_test_00230646.jpg +Places365_test_00230649.jpg +Places365_test_00230681.jpg +Places365_test_00230684.jpg +Places365_test_00230720.jpg +Places365_test_00230732.jpg +Places365_test_00230748.jpg +Places365_test_00230753.jpg +Places365_test_00230757.jpg +Places365_test_00230760.jpg +Places365_test_00230763.jpg +Places365_test_00230768.jpg +Places365_test_00230769.jpg +Places365_test_00230772.jpg +Places365_test_00230777.jpg +Places365_test_00230786.jpg +Places365_test_00230788.jpg +Places365_test_00230790.jpg +Places365_test_00230801.jpg +Places365_test_00230807.jpg +Places365_test_00230820.jpg +Places365_test_00230866.jpg +Places365_test_00230886.jpg +Places365_test_00230890.jpg +Places365_test_00230911.jpg +Places365_test_00230922.jpg +Places365_test_00230923.jpg +Places365_test_00230931.jpg +Places365_test_00230936.jpg +Places365_test_00230945.jpg +Places365_test_00230954.jpg +Places365_test_00230969.jpg +Places365_test_00230972.jpg +Places365_test_00230976.jpg +Places365_test_00230978.jpg +Places365_test_00230987.jpg +Places365_test_00230988.jpg +Places365_test_00230994.jpg +Places365_test_00231005.jpg +Places365_test_00231013.jpg +Places365_test_00231029.jpg +Places365_test_00231033.jpg +Places365_test_00231035.jpg +Places365_test_00231039.jpg +Places365_test_00231062.jpg +Places365_test_00231069.jpg +Places365_test_00231097.jpg +Places365_test_00231115.jpg +Places365_test_00231130.jpg +Places365_test_00231136.jpg +Places365_test_00231142.jpg +Places365_test_00231144.jpg +Places365_test_00231153.jpg +Places365_test_00231169.jpg +Places365_test_00231188.jpg +Places365_test_00231216.jpg +Places365_test_00231233.jpg +Places365_test_00231250.jpg +Places365_test_00231269.jpg +Places365_test_00231346.jpg +Places365_test_00231367.jpg +Places365_test_00231379.jpg +Places365_test_00231395.jpg +Places365_test_00231401.jpg +Places365_test_00231411.jpg +Places365_test_00231413.jpg +Places365_test_00231431.jpg +Places365_test_00231436.jpg +Places365_test_00231441.jpg +Places365_test_00231442.jpg +Places365_test_00231449.jpg +Places365_test_00231455.jpg +Places365_test_00231473.jpg +Places365_test_00231477.jpg +Places365_test_00231494.jpg +Places365_test_00231495.jpg +Places365_test_00231513.jpg +Places365_test_00231520.jpg +Places365_test_00231550.jpg +Places365_test_00231561.jpg +Places365_test_00231578.jpg +Places365_test_00231582.jpg +Places365_test_00231589.jpg +Places365_test_00231597.jpg +Places365_test_00231600.jpg +Places365_test_00231606.jpg +Places365_test_00231624.jpg +Places365_test_00231647.jpg +Places365_test_00231660.jpg +Places365_test_00231665.jpg +Places365_test_00231677.jpg +Places365_test_00231688.jpg +Places365_test_00231698.jpg +Places365_test_00231708.jpg +Places365_test_00231709.jpg +Places365_test_00231712.jpg +Places365_test_00231713.jpg +Places365_test_00231718.jpg +Places365_test_00231729.jpg +Places365_test_00231730.jpg +Places365_test_00231750.jpg +Places365_test_00231754.jpg +Places365_test_00231757.jpg +Places365_test_00231763.jpg +Places365_test_00231765.jpg +Places365_test_00231771.jpg +Places365_test_00231780.jpg +Places365_test_00231781.jpg +Places365_test_00231791.jpg +Places365_test_00231793.jpg +Places365_test_00231804.jpg +Places365_test_00231809.jpg +Places365_test_00231825.jpg +Places365_test_00231830.jpg +Places365_test_00231835.jpg +Places365_test_00231838.jpg +Places365_test_00231847.jpg +Places365_test_00231848.jpg +Places365_test_00231852.jpg +Places365_test_00231853.jpg +Places365_test_00231858.jpg +Places365_test_00231879.jpg +Places365_test_00231889.jpg +Places365_test_00231927.jpg +Places365_test_00231930.jpg +Places365_test_00231937.jpg +Places365_test_00231943.jpg +Places365_test_00231947.jpg +Places365_test_00231990.jpg +Places365_test_00231998.jpg +Places365_test_00232001.jpg +Places365_test_00232004.jpg +Places365_test_00232008.jpg +Places365_test_00232009.jpg +Places365_test_00232037.jpg +Places365_test_00232048.jpg +Places365_test_00232088.jpg +Places365_test_00232096.jpg +Places365_test_00232104.jpg +Places365_test_00232111.jpg +Places365_test_00232119.jpg +Places365_test_00232122.jpg +Places365_test_00232124.jpg +Places365_test_00232126.jpg +Places365_test_00232138.jpg +Places365_test_00232144.jpg +Places365_test_00232147.jpg +Places365_test_00232162.jpg +Places365_test_00232179.jpg +Places365_test_00232180.jpg +Places365_test_00232188.jpg +Places365_test_00232190.jpg +Places365_test_00232200.jpg +Places365_test_00232206.jpg +Places365_test_00232209.jpg +Places365_test_00232211.jpg +Places365_test_00232212.jpg +Places365_test_00232233.jpg +Places365_test_00232276.jpg +Places365_test_00232290.jpg +Places365_test_00232300.jpg +Places365_test_00232320.jpg +Places365_test_00232330.jpg +Places365_test_00232343.jpg +Places365_test_00232356.jpg +Places365_test_00232361.jpg +Places365_test_00232374.jpg +Places365_test_00232375.jpg +Places365_test_00232392.jpg +Places365_test_00232406.jpg +Places365_test_00232417.jpg +Places365_test_00232423.jpg +Places365_test_00232440.jpg +Places365_test_00232443.jpg +Places365_test_00232449.jpg +Places365_test_00232452.jpg +Places365_test_00232459.jpg +Places365_test_00232469.jpg +Places365_test_00232487.jpg +Places365_test_00232537.jpg +Places365_test_00232545.jpg +Places365_test_00232560.jpg +Places365_test_00232570.jpg +Places365_test_00232611.jpg +Places365_test_00232626.jpg +Places365_test_00232630.jpg +Places365_test_00232634.jpg +Places365_test_00232636.jpg +Places365_test_00232648.jpg +Places365_test_00232653.jpg +Places365_test_00232654.jpg +Places365_test_00232672.jpg +Places365_test_00232675.jpg +Places365_test_00232676.jpg +Places365_test_00232689.jpg +Places365_test_00232699.jpg +Places365_test_00232711.jpg +Places365_test_00232718.jpg +Places365_test_00232725.jpg +Places365_test_00232727.jpg +Places365_test_00232739.jpg +Places365_test_00232757.jpg +Places365_test_00232764.jpg +Places365_test_00232789.jpg +Places365_test_00232790.jpg +Places365_test_00232795.jpg +Places365_test_00232800.jpg +Places365_test_00232812.jpg +Places365_test_00232813.jpg +Places365_test_00232819.jpg +Places365_test_00232840.jpg +Places365_test_00232846.jpg +Places365_test_00232855.jpg +Places365_test_00232872.jpg +Places365_test_00232904.jpg +Places365_test_00232905.jpg +Places365_test_00232917.jpg +Places365_test_00232924.jpg +Places365_test_00232926.jpg +Places365_test_00232937.jpg +Places365_test_00232947.jpg +Places365_test_00232955.jpg +Places365_test_00232965.jpg +Places365_test_00232975.jpg +Places365_test_00232979.jpg +Places365_test_00232986.jpg +Places365_test_00232998.jpg +Places365_test_00233023.jpg +Places365_test_00233029.jpg +Places365_test_00233033.jpg +Places365_test_00233035.jpg +Places365_test_00233041.jpg +Places365_test_00233059.jpg +Places365_test_00233065.jpg +Places365_test_00233068.jpg +Places365_test_00233085.jpg +Places365_test_00233092.jpg +Places365_test_00233096.jpg +Places365_test_00233100.jpg +Places365_test_00233114.jpg +Places365_test_00233118.jpg +Places365_test_00233120.jpg +Places365_test_00233156.jpg +Places365_test_00233157.jpg +Places365_test_00233163.jpg +Places365_test_00233167.jpg +Places365_test_00233173.jpg +Places365_test_00233185.jpg +Places365_test_00233194.jpg +Places365_test_00233200.jpg +Places365_test_00233205.jpg +Places365_test_00233239.jpg +Places365_test_00233241.jpg +Places365_test_00233265.jpg +Places365_test_00233281.jpg +Places365_test_00233287.jpg +Places365_test_00233296.jpg +Places365_test_00233299.jpg +Places365_test_00233315.jpg +Places365_test_00233322.jpg +Places365_test_00233327.jpg +Places365_test_00233331.jpg +Places365_test_00233346.jpg +Places365_test_00233350.jpg +Places365_test_00233353.jpg +Places365_test_00233359.jpg +Places365_test_00233362.jpg +Places365_test_00233371.jpg +Places365_test_00233372.jpg +Places365_test_00233385.jpg +Places365_test_00233396.jpg +Places365_test_00233405.jpg +Places365_test_00233406.jpg +Places365_test_00233408.jpg +Places365_test_00233411.jpg +Places365_test_00233423.jpg +Places365_test_00233432.jpg +Places365_test_00233453.jpg +Places365_test_00233468.jpg +Places365_test_00233471.jpg +Places365_test_00233489.jpg +Places365_test_00233512.jpg +Places365_test_00233516.jpg +Places365_test_00233523.jpg +Places365_test_00233537.jpg +Places365_test_00233540.jpg +Places365_test_00233548.jpg +Places365_test_00233559.jpg +Places365_test_00233567.jpg +Places365_test_00233572.jpg +Places365_test_00233573.jpg +Places365_test_00233580.jpg +Places365_test_00233582.jpg +Places365_test_00233597.jpg +Places365_test_00233598.jpg +Places365_test_00233604.jpg +Places365_test_00233608.jpg +Places365_test_00233611.jpg +Places365_test_00233618.jpg +Places365_test_00233620.jpg +Places365_test_00233635.jpg +Places365_test_00233637.jpg +Places365_test_00233638.jpg +Places365_test_00233639.jpg +Places365_test_00233640.jpg +Places365_test_00233642.jpg +Places365_test_00233644.jpg +Places365_test_00233687.jpg +Places365_test_00233689.jpg +Places365_test_00233697.jpg +Places365_test_00233698.jpg +Places365_test_00233705.jpg +Places365_test_00233709.jpg +Places365_test_00233732.jpg +Places365_test_00233733.jpg +Places365_test_00233758.jpg +Places365_test_00233767.jpg +Places365_test_00233770.jpg +Places365_test_00233778.jpg +Places365_test_00233780.jpg +Places365_test_00233783.jpg +Places365_test_00233796.jpg +Places365_test_00233798.jpg +Places365_test_00233806.jpg +Places365_test_00233818.jpg +Places365_test_00233819.jpg +Places365_test_00233823.jpg +Places365_test_00233831.jpg +Places365_test_00233832.jpg +Places365_test_00233869.jpg +Places365_test_00233873.jpg +Places365_test_00233882.jpg +Places365_test_00233913.jpg +Places365_test_00233956.jpg +Places365_test_00233965.jpg +Places365_test_00233967.jpg +Places365_test_00233972.jpg +Places365_test_00233983.jpg +Places365_test_00233991.jpg +Places365_test_00234010.jpg +Places365_test_00234040.jpg +Places365_test_00234049.jpg +Places365_test_00234060.jpg +Places365_test_00234090.jpg +Places365_test_00234097.jpg +Places365_test_00234105.jpg +Places365_test_00234123.jpg +Places365_test_00234129.jpg +Places365_test_00234142.jpg +Places365_test_00234143.jpg +Places365_test_00234148.jpg +Places365_test_00234154.jpg +Places365_test_00234171.jpg +Places365_test_00234179.jpg +Places365_test_00234188.jpg +Places365_test_00234193.jpg +Places365_test_00234217.jpg +Places365_test_00234225.jpg +Places365_test_00234233.jpg +Places365_test_00234235.jpg +Places365_test_00234246.jpg +Places365_test_00234247.jpg +Places365_test_00234279.jpg +Places365_test_00234286.jpg +Places365_test_00234291.jpg +Places365_test_00234299.jpg +Places365_test_00234300.jpg +Places365_test_00234303.jpg +Places365_test_00234304.jpg +Places365_test_00234305.jpg +Places365_test_00234318.jpg +Places365_test_00234332.jpg +Places365_test_00234342.jpg +Places365_test_00234349.jpg +Places365_test_00234357.jpg +Places365_test_00234364.jpg +Places365_test_00234378.jpg +Places365_test_00234382.jpg +Places365_test_00234398.jpg +Places365_test_00234441.jpg +Places365_test_00234451.jpg +Places365_test_00234457.jpg +Places365_test_00234470.jpg +Places365_test_00234476.jpg +Places365_test_00234496.jpg +Places365_test_00234506.jpg +Places365_test_00234507.jpg +Places365_test_00234519.jpg +Places365_test_00234542.jpg +Places365_test_00234544.jpg +Places365_test_00234556.jpg +Places365_test_00234582.jpg +Places365_test_00234583.jpg +Places365_test_00234593.jpg +Places365_test_00234621.jpg +Places365_test_00234626.jpg +Places365_test_00234634.jpg +Places365_test_00234639.jpg +Places365_test_00234647.jpg +Places365_test_00234661.jpg +Places365_test_00234662.jpg +Places365_test_00234677.jpg +Places365_test_00234702.jpg +Places365_test_00234704.jpg +Places365_test_00234714.jpg +Places365_test_00234717.jpg +Places365_test_00234724.jpg +Places365_test_00234736.jpg +Places365_test_00234741.jpg +Places365_test_00234749.jpg +Places365_test_00234773.jpg +Places365_test_00234791.jpg +Places365_test_00234820.jpg +Places365_test_00234831.jpg +Places365_test_00234836.jpg +Places365_test_00234837.jpg +Places365_test_00234850.jpg +Places365_test_00234854.jpg +Places365_test_00234883.jpg +Places365_test_00234892.jpg +Places365_test_00234902.jpg +Places365_test_00234913.jpg +Places365_test_00234914.jpg +Places365_test_00234915.jpg +Places365_test_00234930.jpg +Places365_test_00234939.jpg +Places365_test_00234942.jpg +Places365_test_00234948.jpg +Places365_test_00234951.jpg +Places365_test_00234954.jpg +Places365_test_00234980.jpg +Places365_test_00235006.jpg +Places365_test_00235016.jpg +Places365_test_00235019.jpg +Places365_test_00235030.jpg +Places365_test_00235037.jpg +Places365_test_00235038.jpg +Places365_test_00235053.jpg +Places365_test_00235068.jpg +Places365_test_00235073.jpg +Places365_test_00235075.jpg +Places365_test_00235077.jpg +Places365_test_00235137.jpg +Places365_test_00235159.jpg +Places365_test_00235189.jpg +Places365_test_00235208.jpg +Places365_test_00235219.jpg +Places365_test_00235232.jpg +Places365_test_00235234.jpg +Places365_test_00235239.jpg +Places365_test_00235250.jpg +Places365_test_00235257.jpg +Places365_test_00235268.jpg +Places365_test_00235288.jpg +Places365_test_00235290.jpg +Places365_test_00235309.jpg +Places365_test_00235340.jpg +Places365_test_00235356.jpg +Places365_test_00235370.jpg +Places365_test_00235397.jpg +Places365_test_00235400.jpg +Places365_test_00235404.jpg +Places365_test_00235406.jpg +Places365_test_00235429.jpg +Places365_test_00235434.jpg +Places365_test_00235439.jpg +Places365_test_00235446.jpg +Places365_test_00235453.jpg +Places365_test_00235464.jpg +Places365_test_00235470.jpg +Places365_test_00235473.jpg +Places365_test_00235475.jpg +Places365_test_00235485.jpg +Places365_test_00235486.jpg +Places365_test_00235494.jpg +Places365_test_00235499.jpg +Places365_test_00235504.jpg +Places365_test_00235524.jpg +Places365_test_00235531.jpg +Places365_test_00235554.jpg +Places365_test_00235569.jpg +Places365_test_00235571.jpg +Places365_test_00235576.jpg +Places365_test_00235585.jpg +Places365_test_00235606.jpg +Places365_test_00235623.jpg +Places365_test_00235625.jpg +Places365_test_00235626.jpg +Places365_test_00235634.jpg +Places365_test_00235659.jpg +Places365_test_00235664.jpg +Places365_test_00235685.jpg +Places365_test_00235686.jpg +Places365_test_00235688.jpg +Places365_test_00235718.jpg +Places365_test_00235720.jpg +Places365_test_00235748.jpg +Places365_test_00235764.jpg +Places365_test_00235769.jpg +Places365_test_00235777.jpg +Places365_test_00235779.jpg +Places365_test_00235782.jpg +Places365_test_00235784.jpg +Places365_test_00235798.jpg +Places365_test_00235802.jpg +Places365_test_00235817.jpg +Places365_test_00235831.jpg +Places365_test_00235837.jpg +Places365_test_00235857.jpg +Places365_test_00235871.jpg +Places365_test_00235875.jpg +Places365_test_00235917.jpg +Places365_test_00235932.jpg +Places365_test_00235970.jpg +Places365_test_00236010.jpg +Places365_test_00236011.jpg +Places365_test_00236014.jpg +Places365_test_00236020.jpg +Places365_test_00236024.jpg +Places365_test_00236050.jpg +Places365_test_00236052.jpg +Places365_test_00236057.jpg +Places365_test_00236058.jpg +Places365_test_00236072.jpg +Places365_test_00236093.jpg +Places365_test_00236098.jpg +Places365_test_00236105.jpg +Places365_test_00236114.jpg +Places365_test_00236120.jpg +Places365_test_00236124.jpg +Places365_test_00236133.jpg +Places365_test_00236150.jpg +Places365_test_00236151.jpg +Places365_test_00236152.jpg +Places365_test_00236161.jpg +Places365_test_00236169.jpg +Places365_test_00236170.jpg +Places365_test_00236209.jpg +Places365_test_00236212.jpg +Places365_test_00236230.jpg +Places365_test_00236253.jpg +Places365_test_00236265.jpg +Places365_test_00236267.jpg +Places365_test_00236272.jpg +Places365_test_00236279.jpg +Places365_test_00236284.jpg +Places365_test_00236285.jpg +Places365_test_00236297.jpg +Places365_test_00236305.jpg +Places365_test_00236318.jpg +Places365_test_00236339.jpg +Places365_test_00236343.jpg +Places365_test_00236350.jpg +Places365_test_00236353.jpg +Places365_test_00236357.jpg +Places365_test_00236368.jpg +Places365_test_00236372.jpg +Places365_test_00236374.jpg +Places365_test_00236375.jpg +Places365_test_00236382.jpg +Places365_test_00236420.jpg +Places365_test_00236429.jpg +Places365_test_00236432.jpg +Places365_test_00236433.jpg +Places365_test_00236456.jpg +Places365_test_00236458.jpg +Places365_test_00236459.jpg +Places365_test_00236474.jpg +Places365_test_00236477.jpg +Places365_test_00236480.jpg +Places365_test_00236506.jpg +Places365_test_00236529.jpg +Places365_test_00236532.jpg +Places365_test_00236552.jpg +Places365_test_00236554.jpg +Places365_test_00236565.jpg +Places365_test_00236567.jpg +Places365_test_00236583.jpg +Places365_test_00236584.jpg +Places365_test_00236594.jpg +Places365_test_00236596.jpg +Places365_test_00236609.jpg +Places365_test_00236632.jpg +Places365_test_00236648.jpg +Places365_test_00236650.jpg +Places365_test_00236695.jpg +Places365_test_00236731.jpg +Places365_test_00236738.jpg +Places365_test_00236753.jpg +Places365_test_00236763.jpg +Places365_test_00236769.jpg +Places365_test_00236773.jpg +Places365_test_00236777.jpg +Places365_test_00236785.jpg +Places365_test_00236799.jpg +Places365_test_00236806.jpg +Places365_test_00236810.jpg +Places365_test_00236814.jpg +Places365_test_00236823.jpg +Places365_test_00236845.jpg +Places365_test_00236846.jpg +Places365_test_00236848.jpg +Places365_test_00236873.jpg +Places365_test_00236888.jpg +Places365_test_00236909.jpg +Places365_test_00236917.jpg +Places365_test_00236926.jpg +Places365_test_00236949.jpg +Places365_test_00236957.jpg +Places365_test_00236971.jpg +Places365_test_00236985.jpg +Places365_test_00236987.jpg +Places365_test_00236991.jpg +Places365_test_00237000.jpg +Places365_test_00237016.jpg +Places365_test_00237022.jpg +Places365_test_00237025.jpg +Places365_test_00237027.jpg +Places365_test_00237045.jpg +Places365_test_00237058.jpg +Places365_test_00237075.jpg +Places365_test_00237088.jpg +Places365_test_00237108.jpg +Places365_test_00237123.jpg +Places365_test_00237129.jpg +Places365_test_00237135.jpg +Places365_test_00237179.jpg +Places365_test_00237182.jpg +Places365_test_00237188.jpg +Places365_test_00237191.jpg +Places365_test_00237206.jpg +Places365_test_00237222.jpg +Places365_test_00237232.jpg +Places365_test_00237253.jpg +Places365_test_00237254.jpg +Places365_test_00237266.jpg +Places365_test_00237273.jpg +Places365_test_00237287.jpg +Places365_test_00237291.jpg +Places365_test_00237296.jpg +Places365_test_00237297.jpg +Places365_test_00237300.jpg +Places365_test_00237302.jpg +Places365_test_00237314.jpg +Places365_test_00237316.jpg +Places365_test_00237328.jpg +Places365_test_00237351.jpg +Places365_test_00237365.jpg +Places365_test_00237370.jpg +Places365_test_00237373.jpg +Places365_test_00237390.jpg +Places365_test_00237393.jpg +Places365_test_00237397.jpg +Places365_test_00237405.jpg +Places365_test_00237436.jpg +Places365_test_00237437.jpg +Places365_test_00237440.jpg +Places365_test_00237450.jpg +Places365_test_00237458.jpg +Places365_test_00237464.jpg +Places365_test_00237468.jpg +Places365_test_00237472.jpg +Places365_test_00237494.jpg +Places365_test_00237499.jpg +Places365_test_00237501.jpg +Places365_test_00237508.jpg +Places365_test_00237521.jpg +Places365_test_00237526.jpg +Places365_test_00237561.jpg +Places365_test_00237566.jpg +Places365_test_00237575.jpg +Places365_test_00237578.jpg +Places365_test_00237584.jpg +Places365_test_00237607.jpg +Places365_test_00237616.jpg +Places365_test_00237623.jpg +Places365_test_00237637.jpg +Places365_test_00237665.jpg +Places365_test_00237671.jpg +Places365_test_00237680.jpg +Places365_test_00237696.jpg +Places365_test_00237701.jpg +Places365_test_00237702.jpg +Places365_test_00237713.jpg +Places365_test_00237725.jpg +Places365_test_00237732.jpg +Places365_test_00237739.jpg +Places365_test_00237749.jpg +Places365_test_00237759.jpg +Places365_test_00237760.jpg +Places365_test_00237769.jpg +Places365_test_00237776.jpg +Places365_test_00237796.jpg +Places365_test_00237798.jpg +Places365_test_00237802.jpg +Places365_test_00237825.jpg +Places365_test_00237856.jpg +Places365_test_00237887.jpg +Places365_test_00237904.jpg +Places365_test_00237921.jpg +Places365_test_00237946.jpg +Places365_test_00237958.jpg +Places365_test_00237966.jpg +Places365_test_00237973.jpg +Places365_test_00237988.jpg +Places365_test_00238002.jpg +Places365_test_00238008.jpg +Places365_test_00238014.jpg +Places365_test_00238037.jpg +Places365_test_00238070.jpg +Places365_test_00238073.jpg +Places365_test_00238076.jpg +Places365_test_00238077.jpg +Places365_test_00238078.jpg +Places365_test_00238099.jpg +Places365_test_00238101.jpg +Places365_test_00238109.jpg +Places365_test_00238111.jpg +Places365_test_00238133.jpg +Places365_test_00238147.jpg +Places365_test_00238148.jpg +Places365_test_00238168.jpg +Places365_test_00238178.jpg +Places365_test_00238188.jpg +Places365_test_00238189.jpg +Places365_test_00238194.jpg +Places365_test_00238250.jpg +Places365_test_00238259.jpg +Places365_test_00238268.jpg +Places365_test_00238273.jpg +Places365_test_00238313.jpg +Places365_test_00238325.jpg +Places365_test_00238349.jpg +Places365_test_00238350.jpg +Places365_test_00238355.jpg +Places365_test_00238360.jpg +Places365_test_00238381.jpg +Places365_test_00238393.jpg +Places365_test_00238397.jpg +Places365_test_00238404.jpg +Places365_test_00238407.jpg +Places365_test_00238408.jpg +Places365_test_00238441.jpg +Places365_test_00238442.jpg +Places365_test_00238487.jpg +Places365_test_00238498.jpg +Places365_test_00238519.jpg +Places365_test_00238548.jpg +Places365_test_00238550.jpg +Places365_test_00238555.jpg +Places365_test_00238583.jpg +Places365_test_00238591.jpg +Places365_test_00238592.jpg +Places365_test_00238593.jpg +Places365_test_00238602.jpg +Places365_test_00238608.jpg +Places365_test_00238611.jpg +Places365_test_00238629.jpg +Places365_test_00238637.jpg +Places365_test_00238640.jpg +Places365_test_00238652.jpg +Places365_test_00238678.jpg +Places365_test_00238683.jpg +Places365_test_00238696.jpg +Places365_test_00238712.jpg +Places365_test_00238720.jpg +Places365_test_00238726.jpg +Places365_test_00238727.jpg +Places365_test_00238731.jpg +Places365_test_00238742.jpg +Places365_test_00238750.jpg +Places365_test_00238751.jpg +Places365_test_00238752.jpg +Places365_test_00238757.jpg +Places365_test_00238767.jpg +Places365_test_00238782.jpg +Places365_test_00238806.jpg +Places365_test_00238812.jpg +Places365_test_00238814.jpg +Places365_test_00238821.jpg +Places365_test_00238830.jpg +Places365_test_00238847.jpg +Places365_test_00238863.jpg +Places365_test_00238879.jpg +Places365_test_00238910.jpg +Places365_test_00238917.jpg +Places365_test_00238922.jpg +Places365_test_00238927.jpg +Places365_test_00238929.jpg +Places365_test_00238939.jpg +Places365_test_00238951.jpg +Places365_test_00238956.jpg +Places365_test_00238973.jpg +Places365_test_00238974.jpg +Places365_test_00238983.jpg +Places365_test_00238996.jpg +Places365_test_00239008.jpg +Places365_test_00239011.jpg +Places365_test_00239018.jpg +Places365_test_00239033.jpg +Places365_test_00239074.jpg +Places365_test_00239079.jpg +Places365_test_00239080.jpg +Places365_test_00239093.jpg +Places365_test_00239094.jpg +Places365_test_00239120.jpg +Places365_test_00239132.jpg +Places365_test_00239136.jpg +Places365_test_00239147.jpg +Places365_test_00239152.jpg +Places365_test_00239155.jpg +Places365_test_00239163.jpg +Places365_test_00239168.jpg +Places365_test_00239170.jpg +Places365_test_00239174.jpg +Places365_test_00239194.jpg +Places365_test_00239199.jpg +Places365_test_00239214.jpg +Places365_test_00239237.jpg +Places365_test_00239246.jpg +Places365_test_00239255.jpg +Places365_test_00239274.jpg +Places365_test_00239280.jpg +Places365_test_00239285.jpg +Places365_test_00239290.jpg +Places365_test_00239310.jpg +Places365_test_00239315.jpg +Places365_test_00239318.jpg +Places365_test_00239340.jpg +Places365_test_00239349.jpg +Places365_test_00239360.jpg +Places365_test_00239364.jpg +Places365_test_00239366.jpg +Places365_test_00239380.jpg +Places365_test_00239391.jpg +Places365_test_00239406.jpg +Places365_test_00239413.jpg +Places365_test_00239425.jpg +Places365_test_00239427.jpg +Places365_test_00239430.jpg +Places365_test_00239440.jpg +Places365_test_00239461.jpg +Places365_test_00239473.jpg +Places365_test_00239502.jpg +Places365_test_00239534.jpg +Places365_test_00239557.jpg +Places365_test_00239564.jpg +Places365_test_00239608.jpg +Places365_test_00239635.jpg +Places365_test_00239636.jpg +Places365_test_00239643.jpg +Places365_test_00239668.jpg +Places365_test_00239680.jpg +Places365_test_00239719.jpg +Places365_test_00239731.jpg +Places365_test_00239742.jpg +Places365_test_00239753.jpg +Places365_test_00239761.jpg +Places365_test_00239774.jpg +Places365_test_00239786.jpg +Places365_test_00239805.jpg +Places365_test_00239814.jpg +Places365_test_00239820.jpg +Places365_test_00239850.jpg +Places365_test_00239876.jpg +Places365_test_00239878.jpg +Places365_test_00239881.jpg +Places365_test_00239886.jpg +Places365_test_00239887.jpg +Places365_test_00239902.jpg +Places365_test_00239914.jpg +Places365_test_00239928.jpg +Places365_test_00239941.jpg +Places365_test_00239943.jpg +Places365_test_00239951.jpg +Places365_test_00239954.jpg +Places365_test_00239963.jpg +Places365_test_00239991.jpg +Places365_test_00239995.jpg +Places365_test_00240030.jpg +Places365_test_00240035.jpg +Places365_test_00240038.jpg +Places365_test_00240051.jpg +Places365_test_00240056.jpg +Places365_test_00240060.jpg +Places365_test_00240090.jpg +Places365_test_00240102.jpg +Places365_test_00240112.jpg +Places365_test_00240114.jpg +Places365_test_00240132.jpg +Places365_test_00240135.jpg +Places365_test_00240138.jpg +Places365_test_00240141.jpg +Places365_test_00240161.jpg +Places365_test_00240170.jpg +Places365_test_00240178.jpg +Places365_test_00240179.jpg +Places365_test_00240188.jpg +Places365_test_00240202.jpg +Places365_test_00240214.jpg +Places365_test_00240230.jpg +Places365_test_00240237.jpg +Places365_test_00240255.jpg +Places365_test_00240290.jpg +Places365_test_00240312.jpg +Places365_test_00240320.jpg +Places365_test_00240327.jpg +Places365_test_00240331.jpg +Places365_test_00240356.jpg +Places365_test_00240357.jpg +Places365_test_00240359.jpg +Places365_test_00240373.jpg +Places365_test_00240374.jpg +Places365_test_00240378.jpg +Places365_test_00240385.jpg +Places365_test_00240404.jpg +Places365_test_00240410.jpg +Places365_test_00240433.jpg +Places365_test_00240438.jpg +Places365_test_00240448.jpg +Places365_test_00240469.jpg +Places365_test_00240472.jpg +Places365_test_00240479.jpg +Places365_test_00240493.jpg +Places365_test_00240494.jpg +Places365_test_00240495.jpg +Places365_test_00240513.jpg +Places365_test_00240515.jpg +Places365_test_00240517.jpg +Places365_test_00240524.jpg +Places365_test_00240528.jpg +Places365_test_00240537.jpg +Places365_test_00240540.jpg +Places365_test_00240551.jpg +Places365_test_00240552.jpg +Places365_test_00240554.jpg +Places365_test_00240578.jpg +Places365_test_00240613.jpg +Places365_test_00240616.jpg +Places365_test_00240672.jpg +Places365_test_00240677.jpg +Places365_test_00240678.jpg +Places365_test_00240688.jpg +Places365_test_00240695.jpg +Places365_test_00240707.jpg +Places365_test_00240708.jpg +Places365_test_00240727.jpg +Places365_test_00240732.jpg +Places365_test_00240738.jpg +Places365_test_00240755.jpg +Places365_test_00240758.jpg +Places365_test_00240762.jpg +Places365_test_00240822.jpg +Places365_test_00240828.jpg +Places365_test_00240838.jpg +Places365_test_00240839.jpg +Places365_test_00240847.jpg +Places365_test_00240849.jpg +Places365_test_00240873.jpg +Places365_test_00240886.jpg +Places365_test_00240895.jpg +Places365_test_00240900.jpg +Places365_test_00240902.jpg +Places365_test_00240910.jpg +Places365_test_00240922.jpg +Places365_test_00240933.jpg +Places365_test_00240934.jpg +Places365_test_00240944.jpg +Places365_test_00240949.jpg +Places365_test_00240950.jpg +Places365_test_00240953.jpg +Places365_test_00240958.jpg +Places365_test_00240960.jpg +Places365_test_00240961.jpg +Places365_test_00240971.jpg +Places365_test_00240985.jpg +Places365_test_00240987.jpg +Places365_test_00240988.jpg +Places365_test_00240992.jpg +Places365_test_00241022.jpg +Places365_test_00241028.jpg +Places365_test_00241039.jpg +Places365_test_00241048.jpg +Places365_test_00241087.jpg +Places365_test_00241095.jpg +Places365_test_00241096.jpg +Places365_test_00241102.jpg +Places365_test_00241104.jpg +Places365_test_00241105.jpg +Places365_test_00241113.jpg +Places365_test_00241135.jpg +Places365_test_00241138.jpg +Places365_test_00241164.jpg +Places365_test_00241168.jpg +Places365_test_00241176.jpg +Places365_test_00241187.jpg +Places365_test_00241200.jpg +Places365_test_00241213.jpg +Places365_test_00241219.jpg +Places365_test_00241221.jpg +Places365_test_00241232.jpg +Places365_test_00241241.jpg +Places365_test_00241244.jpg +Places365_test_00241246.jpg +Places365_test_00241248.jpg +Places365_test_00241260.jpg +Places365_test_00241276.jpg +Places365_test_00241284.jpg +Places365_test_00241286.jpg +Places365_test_00241289.jpg +Places365_test_00241318.jpg +Places365_test_00241346.jpg +Places365_test_00241348.jpg +Places365_test_00241350.jpg +Places365_test_00241353.jpg +Places365_test_00241394.jpg +Places365_test_00241395.jpg +Places365_test_00241405.jpg +Places365_test_00241437.jpg +Places365_test_00241438.jpg +Places365_test_00241454.jpg +Places365_test_00241455.jpg +Places365_test_00241471.jpg +Places365_test_00241476.jpg +Places365_test_00241506.jpg +Places365_test_00241520.jpg +Places365_test_00241532.jpg +Places365_test_00241534.jpg +Places365_test_00241561.jpg +Places365_test_00241562.jpg +Places365_test_00241578.jpg +Places365_test_00241580.jpg +Places365_test_00241598.jpg +Places365_test_00241599.jpg +Places365_test_00241613.jpg +Places365_test_00241620.jpg +Places365_test_00241634.jpg +Places365_test_00241683.jpg +Places365_test_00241713.jpg +Places365_test_00241721.jpg +Places365_test_00241737.jpg +Places365_test_00241741.jpg +Places365_test_00241761.jpg +Places365_test_00241766.jpg +Places365_test_00241769.jpg +Places365_test_00241771.jpg +Places365_test_00241778.jpg +Places365_test_00241783.jpg +Places365_test_00241794.jpg +Places365_test_00241799.jpg +Places365_test_00241808.jpg +Places365_test_00241810.jpg +Places365_test_00241827.jpg +Places365_test_00241845.jpg +Places365_test_00241851.jpg +Places365_test_00241852.jpg +Places365_test_00241882.jpg +Places365_test_00241896.jpg +Places365_test_00241907.jpg +Places365_test_00241920.jpg +Places365_test_00241921.jpg +Places365_test_00241940.jpg +Places365_test_00241959.jpg +Places365_test_00241960.jpg +Places365_test_00241974.jpg +Places365_test_00241996.jpg +Places365_test_00242006.jpg +Places365_test_00242013.jpg +Places365_test_00242018.jpg +Places365_test_00242030.jpg +Places365_test_00242033.jpg +Places365_test_00242051.jpg +Places365_test_00242054.jpg +Places365_test_00242061.jpg +Places365_test_00242067.jpg +Places365_test_00242068.jpg +Places365_test_00242079.jpg +Places365_test_00242106.jpg +Places365_test_00242109.jpg +Places365_test_00242134.jpg +Places365_test_00242157.jpg +Places365_test_00242162.jpg +Places365_test_00242169.jpg +Places365_test_00242170.jpg +Places365_test_00242176.jpg +Places365_test_00242185.jpg +Places365_test_00242188.jpg +Places365_test_00242191.jpg +Places365_test_00242199.jpg +Places365_test_00242202.jpg +Places365_test_00242221.jpg +Places365_test_00242227.jpg +Places365_test_00242235.jpg +Places365_test_00242247.jpg +Places365_test_00242256.jpg +Places365_test_00242276.jpg +Places365_test_00242281.jpg +Places365_test_00242293.jpg +Places365_test_00242295.jpg +Places365_test_00242301.jpg +Places365_test_00242307.jpg +Places365_test_00242320.jpg +Places365_test_00242329.jpg +Places365_test_00242333.jpg +Places365_test_00242335.jpg +Places365_test_00242336.jpg +Places365_test_00242345.jpg +Places365_test_00242361.jpg +Places365_test_00242362.jpg +Places365_test_00242370.jpg +Places365_test_00242376.jpg +Places365_test_00242381.jpg +Places365_test_00242388.jpg +Places365_test_00242404.jpg +Places365_test_00242410.jpg +Places365_test_00242414.jpg +Places365_test_00242433.jpg +Places365_test_00242453.jpg +Places365_test_00242459.jpg +Places365_test_00242472.jpg +Places365_test_00242487.jpg +Places365_test_00242488.jpg +Places365_test_00242495.jpg +Places365_test_00242506.jpg +Places365_test_00242512.jpg +Places365_test_00242513.jpg +Places365_test_00242514.jpg +Places365_test_00242524.jpg +Places365_test_00242528.jpg +Places365_test_00242533.jpg +Places365_test_00242536.jpg +Places365_test_00242539.jpg +Places365_test_00242545.jpg +Places365_test_00242562.jpg +Places365_test_00242593.jpg +Places365_test_00242605.jpg +Places365_test_00242622.jpg +Places365_test_00242630.jpg +Places365_test_00242645.jpg +Places365_test_00242656.jpg +Places365_test_00242660.jpg +Places365_test_00242678.jpg +Places365_test_00242682.jpg +Places365_test_00242690.jpg +Places365_test_00242699.jpg +Places365_test_00242705.jpg +Places365_test_00242729.jpg +Places365_test_00242745.jpg +Places365_test_00242750.jpg +Places365_test_00242765.jpg +Places365_test_00242780.jpg +Places365_test_00242787.jpg +Places365_test_00242804.jpg +Places365_test_00242805.jpg +Places365_test_00242815.jpg +Places365_test_00242820.jpg +Places365_test_00242836.jpg +Places365_test_00242854.jpg +Places365_test_00242869.jpg +Places365_test_00242875.jpg +Places365_test_00242927.jpg +Places365_test_00242944.jpg +Places365_test_00242952.jpg +Places365_test_00242953.jpg +Places365_test_00242957.jpg +Places365_test_00242962.jpg +Places365_test_00242972.jpg +Places365_test_00242991.jpg +Places365_test_00243005.jpg +Places365_test_00243035.jpg +Places365_test_00243037.jpg +Places365_test_00243039.jpg +Places365_test_00243052.jpg +Places365_test_00243076.jpg +Places365_test_00243080.jpg +Places365_test_00243081.jpg +Places365_test_00243088.jpg +Places365_test_00243089.jpg +Places365_test_00243092.jpg +Places365_test_00243102.jpg +Places365_test_00243106.jpg +Places365_test_00243108.jpg +Places365_test_00243135.jpg +Places365_test_00243141.jpg +Places365_test_00243191.jpg +Places365_test_00243199.jpg +Places365_test_00243202.jpg +Places365_test_00243204.jpg +Places365_test_00243219.jpg +Places365_test_00243224.jpg +Places365_test_00243232.jpg +Places365_test_00243234.jpg +Places365_test_00243248.jpg +Places365_test_00243257.jpg +Places365_test_00243263.jpg +Places365_test_00243307.jpg +Places365_test_00243311.jpg +Places365_test_00243324.jpg +Places365_test_00243349.jpg +Places365_test_00243353.jpg +Places365_test_00243375.jpg +Places365_test_00243413.jpg +Places365_test_00243419.jpg +Places365_test_00243423.jpg +Places365_test_00243427.jpg +Places365_test_00243437.jpg +Places365_test_00243456.jpg +Places365_test_00243457.jpg +Places365_test_00243459.jpg +Places365_test_00243485.jpg +Places365_test_00243493.jpg +Places365_test_00243501.jpg +Places365_test_00243512.jpg +Places365_test_00243523.jpg +Places365_test_00243528.jpg +Places365_test_00243569.jpg +Places365_test_00243574.jpg +Places365_test_00243576.jpg +Places365_test_00243593.jpg +Places365_test_00243596.jpg +Places365_test_00243608.jpg +Places365_test_00243611.jpg +Places365_test_00243612.jpg +Places365_test_00243620.jpg +Places365_test_00243626.jpg +Places365_test_00243632.jpg +Places365_test_00243643.jpg +Places365_test_00243644.jpg +Places365_test_00243663.jpg +Places365_test_00243669.jpg +Places365_test_00243670.jpg +Places365_test_00243673.jpg +Places365_test_00243678.jpg +Places365_test_00243692.jpg +Places365_test_00243693.jpg +Places365_test_00243697.jpg +Places365_test_00243702.jpg +Places365_test_00243710.jpg +Places365_test_00243717.jpg +Places365_test_00243732.jpg +Places365_test_00243744.jpg +Places365_test_00243769.jpg +Places365_test_00243796.jpg +Places365_test_00243800.jpg +Places365_test_00243811.jpg +Places365_test_00243819.jpg +Places365_test_00243825.jpg +Places365_test_00243830.jpg +Places365_test_00243832.jpg +Places365_test_00243849.jpg +Places365_test_00243851.jpg +Places365_test_00243855.jpg +Places365_test_00243857.jpg +Places365_test_00243861.jpg +Places365_test_00243875.jpg +Places365_test_00243884.jpg +Places365_test_00243918.jpg +Places365_test_00243956.jpg +Places365_test_00243973.jpg +Places365_test_00243988.jpg +Places365_test_00243997.jpg +Places365_test_00244011.jpg +Places365_test_00244022.jpg +Places365_test_00244042.jpg +Places365_test_00244050.jpg +Places365_test_00244059.jpg +Places365_test_00244064.jpg +Places365_test_00244085.jpg +Places365_test_00244122.jpg +Places365_test_00244143.jpg +Places365_test_00244151.jpg +Places365_test_00244181.jpg +Places365_test_00244187.jpg +Places365_test_00244190.jpg +Places365_test_00244194.jpg +Places365_test_00244199.jpg +Places365_test_00244202.jpg +Places365_test_00244207.jpg +Places365_test_00244210.jpg +Places365_test_00244223.jpg +Places365_test_00244227.jpg +Places365_test_00244228.jpg +Places365_test_00244231.jpg +Places365_test_00244234.jpg +Places365_test_00244266.jpg +Places365_test_00244272.jpg +Places365_test_00244278.jpg +Places365_test_00244281.jpg +Places365_test_00244284.jpg +Places365_test_00244300.jpg +Places365_test_00244316.jpg +Places365_test_00244318.jpg +Places365_test_00244337.jpg +Places365_test_00244338.jpg +Places365_test_00244340.jpg +Places365_test_00244341.jpg +Places365_test_00244347.jpg +Places365_test_00244360.jpg +Places365_test_00244361.jpg +Places365_test_00244395.jpg +Places365_test_00244411.jpg +Places365_test_00244462.jpg +Places365_test_00244467.jpg +Places365_test_00244497.jpg +Places365_test_00244514.jpg +Places365_test_00244517.jpg +Places365_test_00244527.jpg +Places365_test_00244536.jpg +Places365_test_00244538.jpg +Places365_test_00244555.jpg +Places365_test_00244560.jpg +Places365_test_00244564.jpg +Places365_test_00244569.jpg +Places365_test_00244579.jpg +Places365_test_00244586.jpg +Places365_test_00244606.jpg +Places365_test_00244618.jpg +Places365_test_00244645.jpg +Places365_test_00244649.jpg +Places365_test_00244651.jpg +Places365_test_00244657.jpg +Places365_test_00244660.jpg +Places365_test_00244663.jpg +Places365_test_00244671.jpg +Places365_test_00244696.jpg +Places365_test_00244702.jpg +Places365_test_00244710.jpg +Places365_test_00244711.jpg +Places365_test_00244712.jpg +Places365_test_00244717.jpg +Places365_test_00244718.jpg +Places365_test_00244768.jpg +Places365_test_00244773.jpg +Places365_test_00244782.jpg +Places365_test_00244791.jpg +Places365_test_00244808.jpg +Places365_test_00244821.jpg +Places365_test_00244822.jpg +Places365_test_00244854.jpg +Places365_test_00244867.jpg +Places365_test_00244876.jpg +Places365_test_00244878.jpg +Places365_test_00244903.jpg +Places365_test_00244914.jpg +Places365_test_00244949.jpg +Places365_test_00244951.jpg +Places365_test_00244952.jpg +Places365_test_00244956.jpg +Places365_test_00244958.jpg +Places365_test_00244976.jpg +Places365_test_00244977.jpg +Places365_test_00244985.jpg +Places365_test_00244993.jpg +Places365_test_00245017.jpg +Places365_test_00245027.jpg +Places365_test_00245033.jpg +Places365_test_00245071.jpg +Places365_test_00245077.jpg +Places365_test_00245082.jpg +Places365_test_00245097.jpg +Places365_test_00245098.jpg +Places365_test_00245112.jpg +Places365_test_00245117.jpg +Places365_test_00245118.jpg +Places365_test_00245145.jpg +Places365_test_00245150.jpg +Places365_test_00245152.jpg +Places365_test_00245189.jpg +Places365_test_00245197.jpg +Places365_test_00245220.jpg +Places365_test_00245240.jpg +Places365_test_00245242.jpg +Places365_test_00245264.jpg +Places365_test_00245267.jpg +Places365_test_00245283.jpg +Places365_test_00245308.jpg +Places365_test_00245313.jpg +Places365_test_00245316.jpg +Places365_test_00245346.jpg +Places365_test_00245353.jpg +Places365_test_00245354.jpg +Places365_test_00245378.jpg +Places365_test_00245404.jpg +Places365_test_00245414.jpg +Places365_test_00245415.jpg +Places365_test_00245424.jpg +Places365_test_00245433.jpg +Places365_test_00245439.jpg +Places365_test_00245460.jpg +Places365_test_00245481.jpg +Places365_test_00245513.jpg +Places365_test_00245530.jpg +Places365_test_00245533.jpg +Places365_test_00245534.jpg +Places365_test_00245548.jpg +Places365_test_00245555.jpg +Places365_test_00245560.jpg +Places365_test_00245568.jpg +Places365_test_00245576.jpg +Places365_test_00245583.jpg +Places365_test_00245599.jpg +Places365_test_00245610.jpg +Places365_test_00245614.jpg +Places365_test_00245646.jpg +Places365_test_00245654.jpg +Places365_test_00245660.jpg +Places365_test_00245677.jpg +Places365_test_00245687.jpg +Places365_test_00245691.jpg +Places365_test_00245705.jpg +Places365_test_00245708.jpg +Places365_test_00245730.jpg +Places365_test_00245757.jpg +Places365_test_00245766.jpg +Places365_test_00245805.jpg +Places365_test_00245809.jpg +Places365_test_00245828.jpg +Places365_test_00245834.jpg +Places365_test_00245840.jpg +Places365_test_00245846.jpg +Places365_test_00245856.jpg +Places365_test_00245857.jpg +Places365_test_00245859.jpg +Places365_test_00245860.jpg +Places365_test_00245871.jpg +Places365_test_00245902.jpg +Places365_test_00245907.jpg +Places365_test_00245918.jpg +Places365_test_00245934.jpg +Places365_test_00245949.jpg +Places365_test_00245950.jpg +Places365_test_00245953.jpg +Places365_test_00245963.jpg +Places365_test_00245984.jpg +Places365_test_00245985.jpg +Places365_test_00245992.jpg +Places365_test_00245994.jpg +Places365_test_00246025.jpg +Places365_test_00246034.jpg +Places365_test_00246039.jpg +Places365_test_00246060.jpg +Places365_test_00246089.jpg +Places365_test_00246093.jpg +Places365_test_00246112.jpg +Places365_test_00246114.jpg +Places365_test_00246131.jpg +Places365_test_00246136.jpg +Places365_test_00246182.jpg +Places365_test_00246183.jpg +Places365_test_00246185.jpg +Places365_test_00246188.jpg +Places365_test_00246209.jpg +Places365_test_00246223.jpg +Places365_test_00246224.jpg +Places365_test_00246227.jpg +Places365_test_00246239.jpg +Places365_test_00246263.jpg +Places365_test_00246277.jpg +Places365_test_00246302.jpg +Places365_test_00246304.jpg +Places365_test_00246320.jpg +Places365_test_00246321.jpg +Places365_test_00246325.jpg +Places365_test_00246328.jpg +Places365_test_00246329.jpg +Places365_test_00246336.jpg +Places365_test_00246342.jpg +Places365_test_00246345.jpg +Places365_test_00246358.jpg +Places365_test_00246362.jpg +Places365_test_00246365.jpg +Places365_test_00246373.jpg +Places365_test_00246386.jpg +Places365_test_00246390.jpg +Places365_test_00246394.jpg +Places365_test_00246421.jpg +Places365_test_00246430.jpg +Places365_test_00246431.jpg +Places365_test_00246446.jpg +Places365_test_00246458.jpg +Places365_test_00246462.jpg +Places365_test_00246467.jpg +Places365_test_00246468.jpg +Places365_test_00246470.jpg +Places365_test_00246494.jpg +Places365_test_00246500.jpg +Places365_test_00246510.jpg +Places365_test_00246515.jpg +Places365_test_00246518.jpg +Places365_test_00246520.jpg +Places365_test_00246521.jpg +Places365_test_00246544.jpg +Places365_test_00246561.jpg +Places365_test_00246575.jpg +Places365_test_00246579.jpg +Places365_test_00246590.jpg +Places365_test_00246597.jpg +Places365_test_00246607.jpg +Places365_test_00246609.jpg +Places365_test_00246611.jpg +Places365_test_00246620.jpg +Places365_test_00246623.jpg +Places365_test_00246634.jpg +Places365_test_00246650.jpg +Places365_test_00246659.jpg +Places365_test_00246662.jpg +Places365_test_00246674.jpg +Places365_test_00246677.jpg +Places365_test_00246722.jpg +Places365_test_00246725.jpg +Places365_test_00246729.jpg +Places365_test_00246742.jpg +Places365_test_00246761.jpg +Places365_test_00246781.jpg +Places365_test_00246803.jpg +Places365_test_00246811.jpg +Places365_test_00246823.jpg +Places365_test_00246836.jpg +Places365_test_00246843.jpg +Places365_test_00246849.jpg +Places365_test_00246864.jpg +Places365_test_00246873.jpg +Places365_test_00246875.jpg +Places365_test_00246876.jpg +Places365_test_00246889.jpg +Places365_test_00246901.jpg +Places365_test_00246916.jpg +Places365_test_00246920.jpg +Places365_test_00246925.jpg +Places365_test_00246939.jpg +Places365_test_00246944.jpg +Places365_test_00246945.jpg +Places365_test_00246958.jpg +Places365_test_00246963.jpg +Places365_test_00246965.jpg +Places365_test_00246980.jpg +Places365_test_00247013.jpg +Places365_test_00247033.jpg +Places365_test_00247047.jpg +Places365_test_00247048.jpg +Places365_test_00247056.jpg +Places365_test_00247063.jpg +Places365_test_00247072.jpg +Places365_test_00247080.jpg +Places365_test_00247090.jpg +Places365_test_00247102.jpg +Places365_test_00247103.jpg +Places365_test_00247108.jpg +Places365_test_00247115.jpg +Places365_test_00247139.jpg +Places365_test_00247158.jpg +Places365_test_00247169.jpg +Places365_test_00247170.jpg +Places365_test_00247175.jpg +Places365_test_00247179.jpg +Places365_test_00247183.jpg +Places365_test_00247206.jpg +Places365_test_00247210.jpg +Places365_test_00247211.jpg +Places365_test_00247214.jpg +Places365_test_00247220.jpg +Places365_test_00247227.jpg +Places365_test_00247233.jpg +Places365_test_00247239.jpg +Places365_test_00247241.jpg +Places365_test_00247247.jpg +Places365_test_00247269.jpg +Places365_test_00247288.jpg +Places365_test_00247308.jpg +Places365_test_00247328.jpg +Places365_test_00247342.jpg +Places365_test_00247344.jpg +Places365_test_00247347.jpg +Places365_test_00247350.jpg +Places365_test_00247352.jpg +Places365_test_00247360.jpg +Places365_test_00247365.jpg +Places365_test_00247367.jpg +Places365_test_00247374.jpg +Places365_test_00247379.jpg +Places365_test_00247391.jpg +Places365_test_00247404.jpg +Places365_test_00247417.jpg +Places365_test_00247419.jpg +Places365_test_00247420.jpg +Places365_test_00247422.jpg +Places365_test_00247423.jpg +Places365_test_00247426.jpg +Places365_test_00247436.jpg +Places365_test_00247465.jpg +Places365_test_00247471.jpg +Places365_test_00247487.jpg +Places365_test_00247495.jpg +Places365_test_00247500.jpg +Places365_test_00247516.jpg +Places365_test_00247521.jpg +Places365_test_00247549.jpg +Places365_test_00247557.jpg +Places365_test_00247558.jpg +Places365_test_00247564.jpg +Places365_test_00247569.jpg +Places365_test_00247577.jpg +Places365_test_00247580.jpg +Places365_test_00247612.jpg +Places365_test_00247618.jpg +Places365_test_00247621.jpg +Places365_test_00247634.jpg +Places365_test_00247642.jpg +Places365_test_00247651.jpg +Places365_test_00247655.jpg +Places365_test_00247661.jpg +Places365_test_00247667.jpg +Places365_test_00247671.jpg +Places365_test_00247679.jpg +Places365_test_00247685.jpg +Places365_test_00247691.jpg +Places365_test_00247692.jpg +Places365_test_00247695.jpg +Places365_test_00247696.jpg +Places365_test_00247726.jpg +Places365_test_00247731.jpg +Places365_test_00247739.jpg +Places365_test_00247743.jpg +Places365_test_00247750.jpg +Places365_test_00247753.jpg +Places365_test_00247777.jpg +Places365_test_00247783.jpg +Places365_test_00247786.jpg +Places365_test_00247811.jpg +Places365_test_00247825.jpg +Places365_test_00247827.jpg +Places365_test_00247834.jpg +Places365_test_00247836.jpg +Places365_test_00247847.jpg +Places365_test_00247849.jpg +Places365_test_00247857.jpg +Places365_test_00247864.jpg +Places365_test_00247882.jpg +Places365_test_00247887.jpg +Places365_test_00247888.jpg +Places365_test_00247889.jpg +Places365_test_00247904.jpg +Places365_test_00247911.jpg +Places365_test_00247925.jpg +Places365_test_00247957.jpg +Places365_test_00247963.jpg +Places365_test_00247973.jpg +Places365_test_00247976.jpg +Places365_test_00247977.jpg +Places365_test_00247986.jpg +Places365_test_00248001.jpg +Places365_test_00248013.jpg +Places365_test_00248014.jpg +Places365_test_00248015.jpg +Places365_test_00248023.jpg +Places365_test_00248038.jpg +Places365_test_00248043.jpg +Places365_test_00248047.jpg +Places365_test_00248067.jpg +Places365_test_00248081.jpg +Places365_test_00248113.jpg +Places365_test_00248159.jpg +Places365_test_00248163.jpg +Places365_test_00248166.jpg +Places365_test_00248177.jpg +Places365_test_00248193.jpg +Places365_test_00248220.jpg +Places365_test_00248243.jpg +Places365_test_00248244.jpg +Places365_test_00248246.jpg +Places365_test_00248247.jpg +Places365_test_00248249.jpg +Places365_test_00248260.jpg +Places365_test_00248278.jpg +Places365_test_00248300.jpg +Places365_test_00248316.jpg +Places365_test_00248333.jpg +Places365_test_00248343.jpg +Places365_test_00248355.jpg +Places365_test_00248370.jpg +Places365_test_00248408.jpg +Places365_test_00248415.jpg +Places365_test_00248418.jpg +Places365_test_00248427.jpg +Places365_test_00248428.jpg +Places365_test_00248431.jpg +Places365_test_00248447.jpg +Places365_test_00248458.jpg +Places365_test_00248464.jpg +Places365_test_00248465.jpg +Places365_test_00248467.jpg +Places365_test_00248482.jpg +Places365_test_00248491.jpg +Places365_test_00248494.jpg +Places365_test_00248527.jpg +Places365_test_00248539.jpg +Places365_test_00248543.jpg +Places365_test_00248573.jpg +Places365_test_00248607.jpg +Places365_test_00248624.jpg +Places365_test_00248626.jpg +Places365_test_00248628.jpg +Places365_test_00248629.jpg +Places365_test_00248648.jpg +Places365_test_00248655.jpg +Places365_test_00248673.jpg +Places365_test_00248679.jpg +Places365_test_00248680.jpg +Places365_test_00248698.jpg +Places365_test_00248703.jpg +Places365_test_00248717.jpg +Places365_test_00248731.jpg +Places365_test_00248767.jpg +Places365_test_00248792.jpg +Places365_test_00248796.jpg +Places365_test_00248806.jpg +Places365_test_00248831.jpg +Places365_test_00248835.jpg +Places365_test_00248841.jpg +Places365_test_00248855.jpg +Places365_test_00248862.jpg +Places365_test_00248868.jpg +Places365_test_00248879.jpg +Places365_test_00248886.jpg +Places365_test_00248898.jpg +Places365_test_00248899.jpg +Places365_test_00248902.jpg +Places365_test_00248904.jpg +Places365_test_00248919.jpg +Places365_test_00248941.jpg +Places365_test_00248944.jpg +Places365_test_00248956.jpg +Places365_test_00248960.jpg +Places365_test_00248966.jpg +Places365_test_00248983.jpg +Places365_test_00248985.jpg +Places365_test_00248988.jpg +Places365_test_00248999.jpg +Places365_test_00249001.jpg +Places365_test_00249004.jpg +Places365_test_00249014.jpg +Places365_test_00249037.jpg +Places365_test_00249043.jpg +Places365_test_00249051.jpg +Places365_test_00249059.jpg +Places365_test_00249067.jpg +Places365_test_00249084.jpg +Places365_test_00249102.jpg +Places365_test_00249107.jpg +Places365_test_00249120.jpg +Places365_test_00249160.jpg +Places365_test_00249185.jpg +Places365_test_00249208.jpg +Places365_test_00249215.jpg +Places365_test_00249223.jpg +Places365_test_00249224.jpg +Places365_test_00249225.jpg +Places365_test_00249227.jpg +Places365_test_00249244.jpg +Places365_test_00249255.jpg +Places365_test_00249256.jpg +Places365_test_00249265.jpg +Places365_test_00249288.jpg +Places365_test_00249296.jpg +Places365_test_00249317.jpg +Places365_test_00249323.jpg +Places365_test_00249333.jpg +Places365_test_00249338.jpg +Places365_test_00249358.jpg +Places365_test_00249372.jpg +Places365_test_00249408.jpg +Places365_test_00249411.jpg +Places365_test_00249420.jpg +Places365_test_00249421.jpg +Places365_test_00249425.jpg +Places365_test_00249435.jpg +Places365_test_00249447.jpg +Places365_test_00249456.jpg +Places365_test_00249463.jpg +Places365_test_00249472.jpg +Places365_test_00249473.jpg +Places365_test_00249496.jpg +Places365_test_00249507.jpg +Places365_test_00249514.jpg +Places365_test_00249516.jpg +Places365_test_00249519.jpg +Places365_test_00249523.jpg +Places365_test_00249527.jpg +Places365_test_00249539.jpg +Places365_test_00249541.jpg +Places365_test_00249560.jpg +Places365_test_00249567.jpg +Places365_test_00249569.jpg +Places365_test_00249578.jpg +Places365_test_00249628.jpg +Places365_test_00249641.jpg +Places365_test_00249654.jpg +Places365_test_00249680.jpg +Places365_test_00249685.jpg +Places365_test_00249688.jpg +Places365_test_00249692.jpg +Places365_test_00249695.jpg +Places365_test_00249716.jpg +Places365_test_00249724.jpg +Places365_test_00249735.jpg +Places365_test_00249737.jpg +Places365_test_00249753.jpg +Places365_test_00249807.jpg +Places365_test_00249824.jpg +Places365_test_00249851.jpg +Places365_test_00249878.jpg +Places365_test_00249882.jpg +Places365_test_00249892.jpg +Places365_test_00249896.jpg +Places365_test_00249900.jpg +Places365_test_00249904.jpg +Places365_test_00249914.jpg +Places365_test_00249918.jpg +Places365_test_00249927.jpg +Places365_test_00249937.jpg +Places365_test_00249952.jpg +Places365_test_00249978.jpg +Places365_test_00249981.jpg +Places365_test_00249984.jpg +Places365_test_00249992.jpg +Places365_test_00249996.jpg +Places365_test_00250022.jpg +Places365_test_00250027.jpg +Places365_test_00250035.jpg +Places365_test_00250037.jpg +Places365_test_00250053.jpg +Places365_test_00250070.jpg +Places365_test_00250106.jpg +Places365_test_00250118.jpg +Places365_test_00250119.jpg +Places365_test_00250128.jpg +Places365_test_00250138.jpg +Places365_test_00250141.jpg +Places365_test_00250155.jpg +Places365_test_00250189.jpg +Places365_test_00250202.jpg +Places365_test_00250204.jpg +Places365_test_00250231.jpg +Places365_test_00250241.jpg +Places365_test_00250246.jpg +Places365_test_00250249.jpg +Places365_test_00250256.jpg +Places365_test_00250258.jpg +Places365_test_00250265.jpg +Places365_test_00250280.jpg +Places365_test_00250282.jpg +Places365_test_00250289.jpg +Places365_test_00250293.jpg +Places365_test_00250305.jpg +Places365_test_00250382.jpg +Places365_test_00250385.jpg +Places365_test_00250393.jpg +Places365_test_00250394.jpg +Places365_test_00250402.jpg +Places365_test_00250414.jpg +Places365_test_00250415.jpg +Places365_test_00250420.jpg +Places365_test_00250440.jpg +Places365_test_00250463.jpg +Places365_test_00250477.jpg +Places365_test_00250478.jpg +Places365_test_00250480.jpg +Places365_test_00250491.jpg +Places365_test_00250493.jpg +Places365_test_00250512.jpg +Places365_test_00250514.jpg +Places365_test_00250516.jpg +Places365_test_00250517.jpg +Places365_test_00250520.jpg +Places365_test_00250549.jpg +Places365_test_00250561.jpg +Places365_test_00250567.jpg +Places365_test_00250568.jpg +Places365_test_00250589.jpg +Places365_test_00250609.jpg +Places365_test_00250614.jpg +Places365_test_00250617.jpg +Places365_test_00250627.jpg +Places365_test_00250632.jpg +Places365_test_00250642.jpg +Places365_test_00250670.jpg +Places365_test_00250675.jpg +Places365_test_00250700.jpg +Places365_test_00250702.jpg +Places365_test_00250703.jpg +Places365_test_00250707.jpg +Places365_test_00250709.jpg +Places365_test_00250713.jpg +Places365_test_00250718.jpg +Places365_test_00250720.jpg +Places365_test_00250735.jpg +Places365_test_00250745.jpg +Places365_test_00250774.jpg +Places365_test_00250779.jpg +Places365_test_00250790.jpg +Places365_test_00250808.jpg +Places365_test_00250810.jpg +Places365_test_00250821.jpg +Places365_test_00250827.jpg +Places365_test_00250843.jpg +Places365_test_00250845.jpg +Places365_test_00250852.jpg +Places365_test_00250871.jpg +Places365_test_00250873.jpg +Places365_test_00250877.jpg +Places365_test_00250881.jpg +Places365_test_00250886.jpg +Places365_test_00250887.jpg +Places365_test_00250920.jpg +Places365_test_00250939.jpg +Places365_test_00250940.jpg +Places365_test_00250954.jpg +Places365_test_00250957.jpg +Places365_test_00250959.jpg +Places365_test_00250979.jpg +Places365_test_00250983.jpg +Places365_test_00250991.jpg +Places365_test_00250993.jpg +Places365_test_00250997.jpg +Places365_test_00251013.jpg +Places365_test_00251020.jpg +Places365_test_00251029.jpg +Places365_test_00251033.jpg +Places365_test_00251041.jpg +Places365_test_00251043.jpg +Places365_test_00251064.jpg +Places365_test_00251077.jpg +Places365_test_00251080.jpg +Places365_test_00251091.jpg +Places365_test_00251096.jpg +Places365_test_00251109.jpg +Places365_test_00251150.jpg +Places365_test_00251151.jpg +Places365_test_00251159.jpg +Places365_test_00251172.jpg +Places365_test_00251176.jpg +Places365_test_00251183.jpg +Places365_test_00251194.jpg +Places365_test_00251248.jpg +Places365_test_00251249.jpg +Places365_test_00251289.jpg +Places365_test_00251290.jpg +Places365_test_00251291.jpg +Places365_test_00251301.jpg +Places365_test_00251304.jpg +Places365_test_00251308.jpg +Places365_test_00251318.jpg +Places365_test_00251323.jpg +Places365_test_00251327.jpg +Places365_test_00251340.jpg +Places365_test_00251349.jpg +Places365_test_00251360.jpg +Places365_test_00251368.jpg +Places365_test_00251373.jpg +Places365_test_00251379.jpg +Places365_test_00251388.jpg +Places365_test_00251392.jpg +Places365_test_00251403.jpg +Places365_test_00251404.jpg +Places365_test_00251408.jpg +Places365_test_00251423.jpg +Places365_test_00251438.jpg +Places365_test_00251439.jpg +Places365_test_00251447.jpg +Places365_test_00251452.jpg +Places365_test_00251469.jpg +Places365_test_00251470.jpg +Places365_test_00251472.jpg +Places365_test_00251475.jpg +Places365_test_00251487.jpg +Places365_test_00251501.jpg +Places365_test_00251505.jpg +Places365_test_00251507.jpg +Places365_test_00251512.jpg +Places365_test_00251521.jpg +Places365_test_00251529.jpg +Places365_test_00251546.jpg +Places365_test_00251569.jpg +Places365_test_00251571.jpg +Places365_test_00251573.jpg +Places365_test_00251582.jpg +Places365_test_00251595.jpg +Places365_test_00251614.jpg +Places365_test_00251644.jpg +Places365_test_00251646.jpg +Places365_test_00251652.jpg +Places365_test_00251671.jpg +Places365_test_00251677.jpg +Places365_test_00251690.jpg +Places365_test_00251705.jpg +Places365_test_00251714.jpg +Places365_test_00251746.jpg +Places365_test_00251758.jpg +Places365_test_00251781.jpg +Places365_test_00251819.jpg +Places365_test_00251824.jpg +Places365_test_00251836.jpg +Places365_test_00251838.jpg +Places365_test_00251849.jpg +Places365_test_00251851.jpg +Places365_test_00251910.jpg +Places365_test_00251931.jpg +Places365_test_00251951.jpg +Places365_test_00251956.jpg +Places365_test_00251961.jpg +Places365_test_00251963.jpg +Places365_test_00251964.jpg +Places365_test_00251974.jpg +Places365_test_00251981.jpg +Places365_test_00251984.jpg +Places365_test_00251990.jpg +Places365_test_00251996.jpg +Places365_test_00252007.jpg +Places365_test_00252012.jpg +Places365_test_00252013.jpg +Places365_test_00252020.jpg +Places365_test_00252043.jpg +Places365_test_00252047.jpg +Places365_test_00252104.jpg +Places365_test_00252129.jpg +Places365_test_00252143.jpg +Places365_test_00252181.jpg +Places365_test_00252191.jpg +Places365_test_00252192.jpg +Places365_test_00252197.jpg +Places365_test_00252200.jpg +Places365_test_00252210.jpg +Places365_test_00252215.jpg +Places365_test_00252234.jpg +Places365_test_00252241.jpg +Places365_test_00252262.jpg +Places365_test_00252288.jpg +Places365_test_00252293.jpg +Places365_test_00252294.jpg +Places365_test_00252309.jpg +Places365_test_00252312.jpg +Places365_test_00252324.jpg +Places365_test_00252329.jpg +Places365_test_00252339.jpg +Places365_test_00252347.jpg +Places365_test_00252366.jpg +Places365_test_00252369.jpg +Places365_test_00252370.jpg +Places365_test_00252389.jpg +Places365_test_00252401.jpg +Places365_test_00252409.jpg +Places365_test_00252411.jpg +Places365_test_00252416.jpg +Places365_test_00252424.jpg +Places365_test_00252430.jpg +Places365_test_00252442.jpg +Places365_test_00252458.jpg +Places365_test_00252462.jpg +Places365_test_00252484.jpg +Places365_test_00252492.jpg +Places365_test_00252494.jpg +Places365_test_00252496.jpg +Places365_test_00252497.jpg +Places365_test_00252508.jpg +Places365_test_00252509.jpg +Places365_test_00252512.jpg +Places365_test_00252534.jpg +Places365_test_00252537.jpg +Places365_test_00252539.jpg +Places365_test_00252546.jpg +Places365_test_00252547.jpg +Places365_test_00252553.jpg +Places365_test_00252560.jpg +Places365_test_00252574.jpg +Places365_test_00252586.jpg +Places365_test_00252612.jpg +Places365_test_00252622.jpg +Places365_test_00252640.jpg +Places365_test_00252645.jpg +Places365_test_00252658.jpg +Places365_test_00252665.jpg +Places365_test_00252673.jpg +Places365_test_00252680.jpg +Places365_test_00252697.jpg +Places365_test_00252704.jpg +Places365_test_00252709.jpg +Places365_test_00252712.jpg +Places365_test_00252714.jpg +Places365_test_00252726.jpg +Places365_test_00252739.jpg +Places365_test_00252757.jpg +Places365_test_00252762.jpg +Places365_test_00252763.jpg +Places365_test_00252765.jpg +Places365_test_00252771.jpg +Places365_test_00252773.jpg +Places365_test_00252777.jpg +Places365_test_00252791.jpg +Places365_test_00252812.jpg +Places365_test_00252833.jpg +Places365_test_00252870.jpg +Places365_test_00252883.jpg +Places365_test_00252886.jpg +Places365_test_00252903.jpg +Places365_test_00252926.jpg +Places365_test_00252927.jpg +Places365_test_00252937.jpg +Places365_test_00252950.jpg +Places365_test_00252961.jpg +Places365_test_00252962.jpg +Places365_test_00252963.jpg +Places365_test_00252971.jpg +Places365_test_00252979.jpg +Places365_test_00252981.jpg +Places365_test_00252989.jpg +Places365_test_00252999.jpg +Places365_test_00253001.jpg +Places365_test_00253004.jpg +Places365_test_00253015.jpg +Places365_test_00253016.jpg +Places365_test_00253018.jpg +Places365_test_00253028.jpg +Places365_test_00253031.jpg +Places365_test_00253033.jpg +Places365_test_00253036.jpg +Places365_test_00253056.jpg +Places365_test_00253059.jpg +Places365_test_00253084.jpg +Places365_test_00253089.jpg +Places365_test_00253112.jpg +Places365_test_00253114.jpg +Places365_test_00253127.jpg +Places365_test_00253130.jpg +Places365_test_00253131.jpg +Places365_test_00253136.jpg +Places365_test_00253138.jpg +Places365_test_00253144.jpg +Places365_test_00253177.jpg +Places365_test_00253178.jpg +Places365_test_00253187.jpg +Places365_test_00253208.jpg +Places365_test_00253248.jpg +Places365_test_00253250.jpg +Places365_test_00253265.jpg +Places365_test_00253284.jpg +Places365_test_00253285.jpg +Places365_test_00253286.jpg +Places365_test_00253287.jpg +Places365_test_00253300.jpg +Places365_test_00253323.jpg +Places365_test_00253336.jpg +Places365_test_00253340.jpg +Places365_test_00253345.jpg +Places365_test_00253351.jpg +Places365_test_00253361.jpg +Places365_test_00253410.jpg +Places365_test_00253414.jpg +Places365_test_00253417.jpg +Places365_test_00253418.jpg +Places365_test_00253429.jpg +Places365_test_00253433.jpg +Places365_test_00253436.jpg +Places365_test_00253458.jpg +Places365_test_00253459.jpg +Places365_test_00253465.jpg +Places365_test_00253467.jpg +Places365_test_00253469.jpg +Places365_test_00253489.jpg +Places365_test_00253494.jpg +Places365_test_00253514.jpg +Places365_test_00253518.jpg +Places365_test_00253522.jpg +Places365_test_00253528.jpg +Places365_test_00253537.jpg +Places365_test_00253556.jpg +Places365_test_00253559.jpg +Places365_test_00253581.jpg +Places365_test_00253606.jpg +Places365_test_00253614.jpg +Places365_test_00253619.jpg +Places365_test_00253621.jpg +Places365_test_00253638.jpg +Places365_test_00253655.jpg +Places365_test_00253675.jpg +Places365_test_00253681.jpg +Places365_test_00253693.jpg +Places365_test_00253695.jpg +Places365_test_00253725.jpg +Places365_test_00253726.jpg +Places365_test_00253738.jpg +Places365_test_00253749.jpg +Places365_test_00253752.jpg +Places365_test_00253767.jpg +Places365_test_00253769.jpg +Places365_test_00253773.jpg +Places365_test_00253775.jpg +Places365_test_00253792.jpg +Places365_test_00253807.jpg +Places365_test_00253808.jpg +Places365_test_00253809.jpg +Places365_test_00253816.jpg +Places365_test_00253849.jpg +Places365_test_00253859.jpg +Places365_test_00253867.jpg +Places365_test_00253874.jpg +Places365_test_00253892.jpg +Places365_test_00253901.jpg +Places365_test_00253904.jpg +Places365_test_00253928.jpg +Places365_test_00253933.jpg +Places365_test_00253948.jpg +Places365_test_00253956.jpg +Places365_test_00253967.jpg +Places365_test_00253974.jpg +Places365_test_00253979.jpg +Places365_test_00254017.jpg +Places365_test_00254024.jpg +Places365_test_00254035.jpg +Places365_test_00254050.jpg +Places365_test_00254053.jpg +Places365_test_00254057.jpg +Places365_test_00254066.jpg +Places365_test_00254069.jpg +Places365_test_00254079.jpg +Places365_test_00254094.jpg +Places365_test_00254106.jpg +Places365_test_00254136.jpg +Places365_test_00254141.jpg +Places365_test_00254172.jpg +Places365_test_00254175.jpg +Places365_test_00254177.jpg +Places365_test_00254202.jpg +Places365_test_00254216.jpg +Places365_test_00254221.jpg +Places365_test_00254230.jpg +Places365_test_00254233.jpg +Places365_test_00254243.jpg +Places365_test_00254264.jpg +Places365_test_00254277.jpg +Places365_test_00254287.jpg +Places365_test_00254290.jpg +Places365_test_00254301.jpg +Places365_test_00254326.jpg +Places365_test_00254333.jpg +Places365_test_00254338.jpg +Places365_test_00254367.jpg +Places365_test_00254374.jpg +Places365_test_00254377.jpg +Places365_test_00254378.jpg +Places365_test_00254391.jpg +Places365_test_00254393.jpg +Places365_test_00254416.jpg +Places365_test_00254429.jpg +Places365_test_00254442.jpg +Places365_test_00254445.jpg +Places365_test_00254454.jpg +Places365_test_00254461.jpg +Places365_test_00254464.jpg +Places365_test_00254466.jpg +Places365_test_00254504.jpg +Places365_test_00254515.jpg +Places365_test_00254517.jpg +Places365_test_00254564.jpg +Places365_test_00254576.jpg +Places365_test_00254619.jpg +Places365_test_00254645.jpg +Places365_test_00254675.jpg +Places365_test_00254688.jpg +Places365_test_00254697.jpg +Places365_test_00254704.jpg +Places365_test_00254706.jpg +Places365_test_00254719.jpg +Places365_test_00254734.jpg +Places365_test_00254745.jpg +Places365_test_00254749.jpg +Places365_test_00254758.jpg +Places365_test_00254765.jpg +Places365_test_00254782.jpg +Places365_test_00254797.jpg +Places365_test_00254803.jpg +Places365_test_00254808.jpg +Places365_test_00254825.jpg +Places365_test_00254843.jpg +Places365_test_00254850.jpg +Places365_test_00254854.jpg +Places365_test_00254855.jpg +Places365_test_00254858.jpg +Places365_test_00254870.jpg +Places365_test_00254873.jpg +Places365_test_00254895.jpg +Places365_test_00254904.jpg +Places365_test_00254923.jpg +Places365_test_00254937.jpg +Places365_test_00254951.jpg +Places365_test_00254957.jpg +Places365_test_00254967.jpg +Places365_test_00254987.jpg +Places365_test_00254999.jpg +Places365_test_00255000.jpg +Places365_test_00255002.jpg +Places365_test_00255014.jpg +Places365_test_00255023.jpg +Places365_test_00255072.jpg +Places365_test_00255088.jpg +Places365_test_00255098.jpg +Places365_test_00255103.jpg +Places365_test_00255111.jpg +Places365_test_00255115.jpg +Places365_test_00255128.jpg +Places365_test_00255129.jpg +Places365_test_00255132.jpg +Places365_test_00255135.jpg +Places365_test_00255142.jpg +Places365_test_00255143.jpg +Places365_test_00255144.jpg +Places365_test_00255168.jpg +Places365_test_00255178.jpg +Places365_test_00255222.jpg +Places365_test_00255247.jpg +Places365_test_00255248.jpg +Places365_test_00255250.jpg +Places365_test_00255260.jpg +Places365_test_00255264.jpg +Places365_test_00255268.jpg +Places365_test_00255279.jpg +Places365_test_00255303.jpg +Places365_test_00255313.jpg +Places365_test_00255318.jpg +Places365_test_00255320.jpg +Places365_test_00255337.jpg +Places365_test_00255342.jpg +Places365_test_00255351.jpg +Places365_test_00255359.jpg +Places365_test_00255377.jpg +Places365_test_00255378.jpg +Places365_test_00255394.jpg +Places365_test_00255403.jpg +Places365_test_00255433.jpg +Places365_test_00255445.jpg +Places365_test_00255462.jpg +Places365_test_00255475.jpg +Places365_test_00255482.jpg +Places365_test_00255487.jpg +Places365_test_00255492.jpg +Places365_test_00255495.jpg +Places365_test_00255499.jpg +Places365_test_00255510.jpg +Places365_test_00255511.jpg +Places365_test_00255520.jpg +Places365_test_00255523.jpg +Places365_test_00255524.jpg +Places365_test_00255531.jpg +Places365_test_00255538.jpg +Places365_test_00255555.jpg +Places365_test_00255579.jpg +Places365_test_00255592.jpg +Places365_test_00255613.jpg +Places365_test_00255637.jpg +Places365_test_00255667.jpg +Places365_test_00255674.jpg +Places365_test_00255686.jpg +Places365_test_00255692.jpg +Places365_test_00255696.jpg +Places365_test_00255698.jpg +Places365_test_00255699.jpg +Places365_test_00255712.jpg +Places365_test_00255722.jpg +Places365_test_00255726.jpg +Places365_test_00255741.jpg +Places365_test_00255756.jpg +Places365_test_00255775.jpg +Places365_test_00255780.jpg +Places365_test_00255783.jpg +Places365_test_00255789.jpg +Places365_test_00255792.jpg +Places365_test_00255795.jpg +Places365_test_00255803.jpg +Places365_test_00255811.jpg +Places365_test_00255818.jpg +Places365_test_00255832.jpg +Places365_test_00255837.jpg +Places365_test_00255857.jpg +Places365_test_00255876.jpg +Places365_test_00255878.jpg +Places365_test_00255897.jpg +Places365_test_00255902.jpg +Places365_test_00255930.jpg +Places365_test_00255937.jpg +Places365_test_00255947.jpg +Places365_test_00255955.jpg +Places365_test_00255982.jpg +Places365_test_00255994.jpg +Places365_test_00256017.jpg +Places365_test_00256029.jpg +Places365_test_00256048.jpg +Places365_test_00256051.jpg +Places365_test_00256055.jpg +Places365_test_00256061.jpg +Places365_test_00256063.jpg +Places365_test_00256069.jpg +Places365_test_00256077.jpg +Places365_test_00256091.jpg +Places365_test_00256092.jpg +Places365_test_00256102.jpg +Places365_test_00256105.jpg +Places365_test_00256106.jpg +Places365_test_00256123.jpg +Places365_test_00256124.jpg +Places365_test_00256130.jpg +Places365_test_00256156.jpg +Places365_test_00256162.jpg +Places365_test_00256172.jpg +Places365_test_00256173.jpg +Places365_test_00256189.jpg +Places365_test_00256191.jpg +Places365_test_00256198.jpg +Places365_test_00256203.jpg +Places365_test_00256205.jpg +Places365_test_00256218.jpg +Places365_test_00256229.jpg +Places365_test_00256232.jpg +Places365_test_00256235.jpg +Places365_test_00256245.jpg +Places365_test_00256258.jpg +Places365_test_00256271.jpg +Places365_test_00256277.jpg +Places365_test_00256281.jpg +Places365_test_00256288.jpg +Places365_test_00256299.jpg +Places365_test_00256301.jpg +Places365_test_00256323.jpg +Places365_test_00256326.jpg +Places365_test_00256343.jpg +Places365_test_00256344.jpg +Places365_test_00256424.jpg +Places365_test_00256454.jpg +Places365_test_00256474.jpg +Places365_test_00256476.jpg +Places365_test_00256506.jpg +Places365_test_00256509.jpg +Places365_test_00256531.jpg +Places365_test_00256546.jpg +Places365_test_00256558.jpg +Places365_test_00256565.jpg +Places365_test_00256572.jpg +Places365_test_00256573.jpg +Places365_test_00256584.jpg +Places365_test_00256590.jpg +Places365_test_00256592.jpg +Places365_test_00256604.jpg +Places365_test_00256611.jpg +Places365_test_00256615.jpg +Places365_test_00256621.jpg +Places365_test_00256636.jpg +Places365_test_00256637.jpg +Places365_test_00256652.jpg +Places365_test_00256656.jpg +Places365_test_00256670.jpg +Places365_test_00256687.jpg +Places365_test_00256699.jpg +Places365_test_00256701.jpg +Places365_test_00256706.jpg +Places365_test_00256714.jpg +Places365_test_00256722.jpg +Places365_test_00256737.jpg +Places365_test_00256745.jpg +Places365_test_00256762.jpg +Places365_test_00256766.jpg +Places365_test_00256770.jpg +Places365_test_00256782.jpg +Places365_test_00256799.jpg +Places365_test_00256800.jpg +Places365_test_00256807.jpg +Places365_test_00256815.jpg +Places365_test_00256838.jpg +Places365_test_00256849.jpg +Places365_test_00256850.jpg +Places365_test_00256853.jpg +Places365_test_00256866.jpg +Places365_test_00256880.jpg +Places365_test_00256889.jpg +Places365_test_00256898.jpg +Places365_test_00256901.jpg +Places365_test_00256905.jpg +Places365_test_00256936.jpg +Places365_test_00256949.jpg +Places365_test_00256955.jpg +Places365_test_00256957.jpg +Places365_test_00256960.jpg +Places365_test_00256962.jpg +Places365_test_00256968.jpg +Places365_test_00257006.jpg +Places365_test_00257018.jpg +Places365_test_00257019.jpg +Places365_test_00257025.jpg +Places365_test_00257027.jpg +Places365_test_00257043.jpg +Places365_test_00257045.jpg +Places365_test_00257048.jpg +Places365_test_00257050.jpg +Places365_test_00257060.jpg +Places365_test_00257082.jpg +Places365_test_00257090.jpg +Places365_test_00257116.jpg +Places365_test_00257125.jpg +Places365_test_00257131.jpg +Places365_test_00257133.jpg +Places365_test_00257137.jpg +Places365_test_00257164.jpg +Places365_test_00257176.jpg +Places365_test_00257186.jpg +Places365_test_00257206.jpg +Places365_test_00257211.jpg +Places365_test_00257242.jpg +Places365_test_00257250.jpg +Places365_test_00257254.jpg +Places365_test_00257258.jpg +Places365_test_00257275.jpg +Places365_test_00257282.jpg +Places365_test_00257303.jpg +Places365_test_00257312.jpg +Places365_test_00257319.jpg +Places365_test_00257330.jpg +Places365_test_00257342.jpg +Places365_test_00257348.jpg +Places365_test_00257354.jpg +Places365_test_00257374.jpg +Places365_test_00257386.jpg +Places365_test_00257394.jpg +Places365_test_00257395.jpg +Places365_test_00257405.jpg +Places365_test_00257406.jpg +Places365_test_00257417.jpg +Places365_test_00257426.jpg +Places365_test_00257442.jpg +Places365_test_00257449.jpg +Places365_test_00257468.jpg +Places365_test_00257477.jpg +Places365_test_00257478.jpg +Places365_test_00257492.jpg +Places365_test_00257499.jpg +Places365_test_00257520.jpg +Places365_test_00257525.jpg +Places365_test_00257544.jpg +Places365_test_00257554.jpg +Places365_test_00257556.jpg +Places365_test_00257557.jpg +Places365_test_00257600.jpg +Places365_test_00257625.jpg +Places365_test_00257651.jpg +Places365_test_00257653.jpg +Places365_test_00257685.jpg +Places365_test_00257688.jpg +Places365_test_00257697.jpg +Places365_test_00257717.jpg +Places365_test_00257732.jpg +Places365_test_00257763.jpg +Places365_test_00257786.jpg +Places365_test_00257811.jpg +Places365_test_00257832.jpg +Places365_test_00257834.jpg +Places365_test_00257862.jpg +Places365_test_00257867.jpg +Places365_test_00257877.jpg +Places365_test_00257888.jpg +Places365_test_00257916.jpg +Places365_test_00257920.jpg +Places365_test_00257925.jpg +Places365_test_00257929.jpg +Places365_test_00257932.jpg +Places365_test_00257935.jpg +Places365_test_00257945.jpg +Places365_test_00257952.jpg +Places365_test_00257955.jpg +Places365_test_00257956.jpg +Places365_test_00257971.jpg +Places365_test_00257973.jpg +Places365_test_00257986.jpg +Places365_test_00257993.jpg +Places365_test_00258002.jpg +Places365_test_00258005.jpg +Places365_test_00258011.jpg +Places365_test_00258012.jpg +Places365_test_00258030.jpg +Places365_test_00258036.jpg +Places365_test_00258038.jpg +Places365_test_00258049.jpg +Places365_test_00258053.jpg +Places365_test_00258058.jpg +Places365_test_00258066.jpg +Places365_test_00258070.jpg +Places365_test_00258073.jpg +Places365_test_00258079.jpg +Places365_test_00258094.jpg +Places365_test_00258101.jpg +Places365_test_00258110.jpg +Places365_test_00258116.jpg +Places365_test_00258123.jpg +Places365_test_00258127.jpg +Places365_test_00258134.jpg +Places365_test_00258139.jpg +Places365_test_00258155.jpg +Places365_test_00258158.jpg +Places365_test_00258173.jpg +Places365_test_00258174.jpg +Places365_test_00258176.jpg +Places365_test_00258180.jpg +Places365_test_00258184.jpg +Places365_test_00258190.jpg +Places365_test_00258210.jpg +Places365_test_00258222.jpg +Places365_test_00258234.jpg +Places365_test_00258238.jpg +Places365_test_00258244.jpg +Places365_test_00258256.jpg +Places365_test_00258273.jpg +Places365_test_00258280.jpg +Places365_test_00258284.jpg +Places365_test_00258287.jpg +Places365_test_00258298.jpg +Places365_test_00258303.jpg +Places365_test_00258333.jpg +Places365_test_00258345.jpg +Places365_test_00258351.jpg +Places365_test_00258356.jpg +Places365_test_00258361.jpg +Places365_test_00258376.jpg +Places365_test_00258395.jpg +Places365_test_00258404.jpg +Places365_test_00258410.jpg +Places365_test_00258411.jpg +Places365_test_00258421.jpg +Places365_test_00258437.jpg +Places365_test_00258450.jpg +Places365_test_00258454.jpg +Places365_test_00258469.jpg +Places365_test_00258483.jpg +Places365_test_00258487.jpg +Places365_test_00258488.jpg +Places365_test_00258489.jpg +Places365_test_00258506.jpg +Places365_test_00258515.jpg +Places365_test_00258559.jpg +Places365_test_00258587.jpg +Places365_test_00258601.jpg +Places365_test_00258603.jpg +Places365_test_00258626.jpg +Places365_test_00258630.jpg +Places365_test_00258632.jpg +Places365_test_00258646.jpg +Places365_test_00258666.jpg +Places365_test_00258667.jpg +Places365_test_00258670.jpg +Places365_test_00258697.jpg +Places365_test_00258707.jpg +Places365_test_00258744.jpg +Places365_test_00258772.jpg +Places365_test_00258776.jpg +Places365_test_00258778.jpg +Places365_test_00258804.jpg +Places365_test_00258813.jpg +Places365_test_00258820.jpg +Places365_test_00258827.jpg +Places365_test_00258841.jpg +Places365_test_00258843.jpg +Places365_test_00258862.jpg +Places365_test_00258865.jpg +Places365_test_00258868.jpg +Places365_test_00258882.jpg +Places365_test_00258897.jpg +Places365_test_00258911.jpg +Places365_test_00258918.jpg +Places365_test_00258936.jpg +Places365_test_00258938.jpg +Places365_test_00259012.jpg +Places365_test_00259016.jpg +Places365_test_00259048.jpg +Places365_test_00259057.jpg +Places365_test_00259067.jpg +Places365_test_00259069.jpg +Places365_test_00259076.jpg +Places365_test_00259086.jpg +Places365_test_00259094.jpg +Places365_test_00259113.jpg +Places365_test_00259114.jpg +Places365_test_00259117.jpg +Places365_test_00259130.jpg +Places365_test_00259140.jpg +Places365_test_00259146.jpg +Places365_test_00259152.jpg +Places365_test_00259153.jpg +Places365_test_00259161.jpg +Places365_test_00259172.jpg +Places365_test_00259174.jpg +Places365_test_00259175.jpg +Places365_test_00259194.jpg +Places365_test_00259195.jpg +Places365_test_00259202.jpg +Places365_test_00259232.jpg +Places365_test_00259243.jpg +Places365_test_00259253.jpg +Places365_test_00259260.jpg +Places365_test_00259262.jpg +Places365_test_00259278.jpg +Places365_test_00259295.jpg +Places365_test_00259306.jpg +Places365_test_00259330.jpg +Places365_test_00259332.jpg +Places365_test_00259336.jpg +Places365_test_00259340.jpg +Places365_test_00259348.jpg +Places365_test_00259364.jpg +Places365_test_00259366.jpg +Places365_test_00259391.jpg +Places365_test_00259418.jpg +Places365_test_00259429.jpg +Places365_test_00259439.jpg +Places365_test_00259443.jpg +Places365_test_00259444.jpg +Places365_test_00259461.jpg +Places365_test_00259464.jpg +Places365_test_00259472.jpg +Places365_test_00259479.jpg +Places365_test_00259493.jpg +Places365_test_00259499.jpg +Places365_test_00259526.jpg +Places365_test_00259530.jpg +Places365_test_00259534.jpg +Places365_test_00259535.jpg +Places365_test_00259542.jpg +Places365_test_00259549.jpg +Places365_test_00259582.jpg +Places365_test_00259611.jpg +Places365_test_00259617.jpg +Places365_test_00259632.jpg +Places365_test_00259656.jpg +Places365_test_00259660.jpg +Places365_test_00259661.jpg +Places365_test_00259664.jpg +Places365_test_00259669.jpg +Places365_test_00259687.jpg +Places365_test_00259696.jpg +Places365_test_00259701.jpg +Places365_test_00259733.jpg +Places365_test_00259738.jpg +Places365_test_00259741.jpg +Places365_test_00259753.jpg +Places365_test_00259759.jpg +Places365_test_00259770.jpg +Places365_test_00259772.jpg +Places365_test_00259779.jpg +Places365_test_00259782.jpg +Places365_test_00259808.jpg +Places365_test_00259822.jpg +Places365_test_00259830.jpg +Places365_test_00259856.jpg +Places365_test_00259861.jpg +Places365_test_00259870.jpg +Places365_test_00259881.jpg +Places365_test_00259883.jpg +Places365_test_00259885.jpg +Places365_test_00259890.jpg +Places365_test_00259898.jpg +Places365_test_00259909.jpg +Places365_test_00259918.jpg +Places365_test_00259939.jpg +Places365_test_00259968.jpg +Places365_test_00259979.jpg +Places365_test_00259983.jpg +Places365_test_00259988.jpg +Places365_test_00260000.jpg +Places365_test_00260004.jpg +Places365_test_00260006.jpg +Places365_test_00260012.jpg +Places365_test_00260018.jpg +Places365_test_00260024.jpg +Places365_test_00260028.jpg +Places365_test_00260030.jpg +Places365_test_00260052.jpg +Places365_test_00260059.jpg +Places365_test_00260067.jpg +Places365_test_00260084.jpg +Places365_test_00260086.jpg +Places365_test_00260106.jpg +Places365_test_00260114.jpg +Places365_test_00260123.jpg +Places365_test_00260124.jpg +Places365_test_00260142.jpg +Places365_test_00260145.jpg +Places365_test_00260156.jpg +Places365_test_00260167.jpg +Places365_test_00260172.jpg +Places365_test_00260187.jpg +Places365_test_00260191.jpg +Places365_test_00260216.jpg +Places365_test_00260218.jpg +Places365_test_00260231.jpg +Places365_test_00260241.jpg +Places365_test_00260264.jpg +Places365_test_00260265.jpg +Places365_test_00260269.jpg +Places365_test_00260291.jpg +Places365_test_00260305.jpg +Places365_test_00260345.jpg +Places365_test_00260347.jpg +Places365_test_00260352.jpg +Places365_test_00260368.jpg +Places365_test_00260380.jpg +Places365_test_00260405.jpg +Places365_test_00260406.jpg +Places365_test_00260415.jpg +Places365_test_00260421.jpg +Places365_test_00260425.jpg +Places365_test_00260429.jpg +Places365_test_00260435.jpg +Places365_test_00260462.jpg +Places365_test_00260472.jpg +Places365_test_00260485.jpg +Places365_test_00260488.jpg +Places365_test_00260507.jpg +Places365_test_00260525.jpg +Places365_test_00260526.jpg +Places365_test_00260532.jpg +Places365_test_00260553.jpg +Places365_test_00260571.jpg +Places365_test_00260593.jpg +Places365_test_00260597.jpg +Places365_test_00260598.jpg +Places365_test_00260600.jpg +Places365_test_00260603.jpg +Places365_test_00260617.jpg +Places365_test_00260622.jpg +Places365_test_00260623.jpg +Places365_test_00260624.jpg +Places365_test_00260640.jpg +Places365_test_00260643.jpg +Places365_test_00260646.jpg +Places365_test_00260657.jpg +Places365_test_00260670.jpg +Places365_test_00260674.jpg +Places365_test_00260676.jpg +Places365_test_00260684.jpg +Places365_test_00260686.jpg +Places365_test_00260696.jpg +Places365_test_00260709.jpg +Places365_test_00260714.jpg +Places365_test_00260725.jpg +Places365_test_00260729.jpg +Places365_test_00260735.jpg +Places365_test_00260743.jpg +Places365_test_00260753.jpg +Places365_test_00260756.jpg +Places365_test_00260758.jpg +Places365_test_00260762.jpg +Places365_test_00260772.jpg +Places365_test_00260783.jpg +Places365_test_00260791.jpg +Places365_test_00260809.jpg +Places365_test_00260820.jpg +Places365_test_00260842.jpg +Places365_test_00260845.jpg +Places365_test_00260846.jpg +Places365_test_00260847.jpg +Places365_test_00260855.jpg +Places365_test_00260863.jpg +Places365_test_00260875.jpg +Places365_test_00260891.jpg +Places365_test_00260896.jpg +Places365_test_00260934.jpg +Places365_test_00260940.jpg +Places365_test_00260957.jpg +Places365_test_00260973.jpg +Places365_test_00260974.jpg +Places365_test_00260994.jpg +Places365_test_00260999.jpg +Places365_test_00261000.jpg +Places365_test_00261001.jpg +Places365_test_00261011.jpg +Places365_test_00261023.jpg +Places365_test_00261035.jpg +Places365_test_00261041.jpg +Places365_test_00261043.jpg +Places365_test_00261056.jpg +Places365_test_00261062.jpg +Places365_test_00261073.jpg +Places365_test_00261082.jpg +Places365_test_00261087.jpg +Places365_test_00261097.jpg +Places365_test_00261107.jpg +Places365_test_00261119.jpg +Places365_test_00261144.jpg +Places365_test_00261157.jpg +Places365_test_00261187.jpg +Places365_test_00261188.jpg +Places365_test_00261190.jpg +Places365_test_00261193.jpg +Places365_test_00261201.jpg +Places365_test_00261212.jpg +Places365_test_00261252.jpg +Places365_test_00261255.jpg +Places365_test_00261258.jpg +Places365_test_00261265.jpg +Places365_test_00261294.jpg +Places365_test_00261310.jpg +Places365_test_00261314.jpg +Places365_test_00261341.jpg +Places365_test_00261342.jpg +Places365_test_00261345.jpg +Places365_test_00261352.jpg +Places365_test_00261355.jpg +Places365_test_00261404.jpg +Places365_test_00261405.jpg +Places365_test_00261418.jpg +Places365_test_00261423.jpg +Places365_test_00261450.jpg +Places365_test_00261462.jpg +Places365_test_00261483.jpg +Places365_test_00261486.jpg +Places365_test_00261507.jpg +Places365_test_00261512.jpg +Places365_test_00261519.jpg +Places365_test_00261520.jpg +Places365_test_00261526.jpg +Places365_test_00261530.jpg +Places365_test_00261534.jpg +Places365_test_00261540.jpg +Places365_test_00261546.jpg +Places365_test_00261548.jpg +Places365_test_00261559.jpg +Places365_test_00261570.jpg +Places365_test_00261603.jpg +Places365_test_00261604.jpg +Places365_test_00261612.jpg +Places365_test_00261621.jpg +Places365_test_00261626.jpg +Places365_test_00261661.jpg +Places365_test_00261663.jpg +Places365_test_00261668.jpg +Places365_test_00261675.jpg +Places365_test_00261676.jpg +Places365_test_00261683.jpg +Places365_test_00261686.jpg +Places365_test_00261701.jpg +Places365_test_00261705.jpg +Places365_test_00261719.jpg +Places365_test_00261733.jpg +Places365_test_00261742.jpg +Places365_test_00261745.jpg +Places365_test_00261746.jpg +Places365_test_00261766.jpg +Places365_test_00261788.jpg +Places365_test_00261793.jpg +Places365_test_00261816.jpg +Places365_test_00261883.jpg +Places365_test_00261897.jpg +Places365_test_00261921.jpg +Places365_test_00261928.jpg +Places365_test_00261966.jpg +Places365_test_00261968.jpg +Places365_test_00261970.jpg +Places365_test_00261973.jpg +Places365_test_00261976.jpg +Places365_test_00261990.jpg +Places365_test_00262003.jpg +Places365_test_00262014.jpg +Places365_test_00262021.jpg +Places365_test_00262027.jpg +Places365_test_00262028.jpg +Places365_test_00262061.jpg +Places365_test_00262068.jpg +Places365_test_00262092.jpg +Places365_test_00262101.jpg +Places365_test_00262115.jpg +Places365_test_00262124.jpg +Places365_test_00262125.jpg +Places365_test_00262151.jpg +Places365_test_00262165.jpg +Places365_test_00262170.jpg +Places365_test_00262175.jpg +Places365_test_00262198.jpg +Places365_test_00262208.jpg +Places365_test_00262213.jpg +Places365_test_00262215.jpg +Places365_test_00262233.jpg +Places365_test_00262237.jpg +Places365_test_00262250.jpg +Places365_test_00262263.jpg +Places365_test_00262276.jpg +Places365_test_00262280.jpg +Places365_test_00262284.jpg +Places365_test_00262297.jpg +Places365_test_00262299.jpg +Places365_test_00262311.jpg +Places365_test_00262334.jpg +Places365_test_00262338.jpg +Places365_test_00262356.jpg +Places365_test_00262358.jpg +Places365_test_00262360.jpg +Places365_test_00262361.jpg +Places365_test_00262364.jpg +Places365_test_00262374.jpg +Places365_test_00262383.jpg +Places365_test_00262396.jpg +Places365_test_00262409.jpg +Places365_test_00262410.jpg +Places365_test_00262414.jpg +Places365_test_00262416.jpg +Places365_test_00262418.jpg +Places365_test_00262423.jpg +Places365_test_00262437.jpg +Places365_test_00262455.jpg +Places365_test_00262468.jpg +Places365_test_00262472.jpg +Places365_test_00262488.jpg +Places365_test_00262503.jpg +Places365_test_00262509.jpg +Places365_test_00262510.jpg +Places365_test_00262537.jpg +Places365_test_00262542.jpg +Places365_test_00262549.jpg +Places365_test_00262567.jpg +Places365_test_00262574.jpg +Places365_test_00262595.jpg +Places365_test_00262596.jpg +Places365_test_00262610.jpg +Places365_test_00262615.jpg +Places365_test_00262640.jpg +Places365_test_00262644.jpg +Places365_test_00262645.jpg +Places365_test_00262646.jpg +Places365_test_00262648.jpg +Places365_test_00262654.jpg +Places365_test_00262659.jpg +Places365_test_00262660.jpg +Places365_test_00262663.jpg +Places365_test_00262668.jpg +Places365_test_00262674.jpg +Places365_test_00262676.jpg +Places365_test_00262690.jpg +Places365_test_00262709.jpg +Places365_test_00262721.jpg +Places365_test_00262722.jpg +Places365_test_00262730.jpg +Places365_test_00262732.jpg +Places365_test_00262753.jpg +Places365_test_00262762.jpg +Places365_test_00262766.jpg +Places365_test_00262800.jpg +Places365_test_00262811.jpg +Places365_test_00262821.jpg +Places365_test_00262838.jpg +Places365_test_00262840.jpg +Places365_test_00262849.jpg +Places365_test_00262851.jpg +Places365_test_00262852.jpg +Places365_test_00262855.jpg +Places365_test_00262856.jpg +Places365_test_00262879.jpg +Places365_test_00262891.jpg +Places365_test_00262898.jpg +Places365_test_00262906.jpg +Places365_test_00262908.jpg +Places365_test_00262909.jpg +Places365_test_00262912.jpg +Places365_test_00262925.jpg +Places365_test_00262927.jpg +Places365_test_00262929.jpg +Places365_test_00262934.jpg +Places365_test_00262937.jpg +Places365_test_00262979.jpg +Places365_test_00262984.jpg +Places365_test_00262992.jpg +Places365_test_00262995.jpg +Places365_test_00263021.jpg +Places365_test_00263022.jpg +Places365_test_00263031.jpg +Places365_test_00263035.jpg +Places365_test_00263036.jpg +Places365_test_00263040.jpg +Places365_test_00263046.jpg +Places365_test_00263050.jpg +Places365_test_00263054.jpg +Places365_test_00263067.jpg +Places365_test_00263079.jpg +Places365_test_00263099.jpg +Places365_test_00263103.jpg +Places365_test_00263112.jpg +Places365_test_00263129.jpg +Places365_test_00263134.jpg +Places365_test_00263140.jpg +Places365_test_00263166.jpg +Places365_test_00263173.jpg +Places365_test_00263203.jpg +Places365_test_00263211.jpg +Places365_test_00263216.jpg +Places365_test_00263250.jpg +Places365_test_00263260.jpg +Places365_test_00263264.jpg +Places365_test_00263276.jpg +Places365_test_00263287.jpg +Places365_test_00263294.jpg +Places365_test_00263312.jpg +Places365_test_00263333.jpg +Places365_test_00263343.jpg +Places365_test_00263350.jpg +Places365_test_00263361.jpg +Places365_test_00263366.jpg +Places365_test_00263376.jpg +Places365_test_00263380.jpg +Places365_test_00263388.jpg +Places365_test_00263389.jpg +Places365_test_00263390.jpg +Places365_test_00263393.jpg +Places365_test_00263400.jpg +Places365_test_00263406.jpg +Places365_test_00263420.jpg +Places365_test_00263445.jpg +Places365_test_00263452.jpg +Places365_test_00263455.jpg +Places365_test_00263458.jpg +Places365_test_00263469.jpg +Places365_test_00263480.jpg +Places365_test_00263486.jpg +Places365_test_00263507.jpg +Places365_test_00263513.jpg +Places365_test_00263525.jpg +Places365_test_00263540.jpg +Places365_test_00263550.jpg +Places365_test_00263578.jpg +Places365_test_00263592.jpg +Places365_test_00263603.jpg +Places365_test_00263605.jpg +Places365_test_00263608.jpg +Places365_test_00263609.jpg +Places365_test_00263620.jpg +Places365_test_00263634.jpg +Places365_test_00263643.jpg +Places365_test_00263663.jpg +Places365_test_00263667.jpg +Places365_test_00263674.jpg +Places365_test_00263703.jpg +Places365_test_00263710.jpg +Places365_test_00263713.jpg +Places365_test_00263740.jpg +Places365_test_00263766.jpg +Places365_test_00263770.jpg +Places365_test_00263778.jpg +Places365_test_00263779.jpg +Places365_test_00263785.jpg +Places365_test_00263797.jpg +Places365_test_00263807.jpg +Places365_test_00263820.jpg +Places365_test_00263826.jpg +Places365_test_00263832.jpg +Places365_test_00263834.jpg +Places365_test_00263836.jpg +Places365_test_00263845.jpg +Places365_test_00263846.jpg +Places365_test_00263857.jpg +Places365_test_00263870.jpg +Places365_test_00263886.jpg +Places365_test_00263905.jpg +Places365_test_00263916.jpg +Places365_test_00263921.jpg +Places365_test_00263934.jpg +Places365_test_00263951.jpg +Places365_test_00263954.jpg +Places365_test_00263973.jpg +Places365_test_00263988.jpg +Places365_test_00263995.jpg +Places365_test_00264008.jpg +Places365_test_00264019.jpg +Places365_test_00264023.jpg +Places365_test_00264024.jpg +Places365_test_00264035.jpg +Places365_test_00264043.jpg +Places365_test_00264049.jpg +Places365_test_00264069.jpg +Places365_test_00264070.jpg +Places365_test_00264074.jpg +Places365_test_00264095.jpg +Places365_test_00264106.jpg +Places365_test_00264115.jpg +Places365_test_00264147.jpg +Places365_test_00264169.jpg +Places365_test_00264179.jpg +Places365_test_00264182.jpg +Places365_test_00264199.jpg +Places365_test_00264210.jpg +Places365_test_00264221.jpg +Places365_test_00264223.jpg +Places365_test_00264231.jpg +Places365_test_00264235.jpg +Places365_test_00264246.jpg +Places365_test_00264254.jpg +Places365_test_00264257.jpg +Places365_test_00264264.jpg +Places365_test_00264267.jpg +Places365_test_00264270.jpg +Places365_test_00264271.jpg +Places365_test_00264283.jpg +Places365_test_00264305.jpg +Places365_test_00264330.jpg +Places365_test_00264349.jpg +Places365_test_00264353.jpg +Places365_test_00264359.jpg +Places365_test_00264360.jpg +Places365_test_00264361.jpg +Places365_test_00264369.jpg +Places365_test_00264379.jpg +Places365_test_00264392.jpg +Places365_test_00264397.jpg +Places365_test_00264400.jpg +Places365_test_00264403.jpg +Places365_test_00264411.jpg +Places365_test_00264414.jpg +Places365_test_00264415.jpg +Places365_test_00264419.jpg +Places365_test_00264436.jpg +Places365_test_00264445.jpg +Places365_test_00264446.jpg +Places365_test_00264459.jpg +Places365_test_00264462.jpg +Places365_test_00264464.jpg +Places365_test_00264467.jpg +Places365_test_00264488.jpg +Places365_test_00264504.jpg +Places365_test_00264512.jpg +Places365_test_00264513.jpg +Places365_test_00264523.jpg +Places365_test_00264550.jpg +Places365_test_00264553.jpg +Places365_test_00264566.jpg +Places365_test_00264580.jpg +Places365_test_00264585.jpg +Places365_test_00264589.jpg +Places365_test_00264602.jpg +Places365_test_00264621.jpg +Places365_test_00264631.jpg +Places365_test_00264636.jpg +Places365_test_00264640.jpg +Places365_test_00264641.jpg +Places365_test_00264642.jpg +Places365_test_00264648.jpg +Places365_test_00264652.jpg +Places365_test_00264653.jpg +Places365_test_00264676.jpg +Places365_test_00264682.jpg +Places365_test_00264718.jpg +Places365_test_00264747.jpg +Places365_test_00264754.jpg +Places365_test_00264755.jpg +Places365_test_00264767.jpg +Places365_test_00264769.jpg +Places365_test_00264783.jpg +Places365_test_00264788.jpg +Places365_test_00264794.jpg +Places365_test_00264798.jpg +Places365_test_00264807.jpg +Places365_test_00264811.jpg +Places365_test_00264813.jpg +Places365_test_00264815.jpg +Places365_test_00264845.jpg +Places365_test_00264854.jpg +Places365_test_00264859.jpg +Places365_test_00264887.jpg +Places365_test_00264899.jpg +Places365_test_00264907.jpg +Places365_test_00264921.jpg +Places365_test_00264925.jpg +Places365_test_00264926.jpg +Places365_test_00264939.jpg +Places365_test_00264952.jpg +Places365_test_00264977.jpg +Places365_test_00264985.jpg +Places365_test_00264990.jpg +Places365_test_00264992.jpg +Places365_test_00265001.jpg +Places365_test_00265023.jpg +Places365_test_00265029.jpg +Places365_test_00265032.jpg +Places365_test_00265040.jpg +Places365_test_00265042.jpg +Places365_test_00265056.jpg +Places365_test_00265057.jpg +Places365_test_00265066.jpg +Places365_test_00265091.jpg +Places365_test_00265105.jpg +Places365_test_00265109.jpg +Places365_test_00265116.jpg +Places365_test_00265129.jpg +Places365_test_00265154.jpg +Places365_test_00265183.jpg +Places365_test_00265189.jpg +Places365_test_00265217.jpg +Places365_test_00265242.jpg +Places365_test_00265244.jpg +Places365_test_00265250.jpg +Places365_test_00265259.jpg +Places365_test_00265263.jpg +Places365_test_00265281.jpg +Places365_test_00265285.jpg +Places365_test_00265303.jpg +Places365_test_00265304.jpg +Places365_test_00265309.jpg +Places365_test_00265333.jpg +Places365_test_00265337.jpg +Places365_test_00265356.jpg +Places365_test_00265362.jpg +Places365_test_00265392.jpg +Places365_test_00265395.jpg +Places365_test_00265397.jpg +Places365_test_00265406.jpg +Places365_test_00265412.jpg +Places365_test_00265436.jpg +Places365_test_00265447.jpg +Places365_test_00265477.jpg +Places365_test_00265493.jpg +Places365_test_00265497.jpg +Places365_test_00265512.jpg +Places365_test_00265516.jpg +Places365_test_00265517.jpg +Places365_test_00265519.jpg +Places365_test_00265537.jpg +Places365_test_00265542.jpg +Places365_test_00265567.jpg +Places365_test_00265573.jpg +Places365_test_00265578.jpg +Places365_test_00265593.jpg +Places365_test_00265595.jpg +Places365_test_00265605.jpg +Places365_test_00265609.jpg +Places365_test_00265617.jpg +Places365_test_00265622.jpg +Places365_test_00265635.jpg +Places365_test_00265662.jpg +Places365_test_00265667.jpg +Places365_test_00265686.jpg +Places365_test_00265703.jpg +Places365_test_00265708.jpg +Places365_test_00265710.jpg +Places365_test_00265714.jpg +Places365_test_00265716.jpg +Places365_test_00265722.jpg +Places365_test_00265723.jpg +Places365_test_00265731.jpg +Places365_test_00265734.jpg +Places365_test_00265737.jpg +Places365_test_00265740.jpg +Places365_test_00265741.jpg +Places365_test_00265761.jpg +Places365_test_00265776.jpg +Places365_test_00265779.jpg +Places365_test_00265781.jpg +Places365_test_00265786.jpg +Places365_test_00265790.jpg +Places365_test_00265802.jpg +Places365_test_00265822.jpg +Places365_test_00265840.jpg +Places365_test_00265856.jpg +Places365_test_00265873.jpg +Places365_test_00265888.jpg +Places365_test_00265890.jpg +Places365_test_00265918.jpg +Places365_test_00265919.jpg +Places365_test_00265928.jpg +Places365_test_00265942.jpg +Places365_test_00265961.jpg +Places365_test_00265967.jpg +Places365_test_00265968.jpg +Places365_test_00265971.jpg +Places365_test_00265976.jpg +Places365_test_00266000.jpg +Places365_test_00266019.jpg +Places365_test_00266024.jpg +Places365_test_00266042.jpg +Places365_test_00266048.jpg +Places365_test_00266074.jpg +Places365_test_00266082.jpg +Places365_test_00266100.jpg +Places365_test_00266114.jpg +Places365_test_00266125.jpg +Places365_test_00266126.jpg +Places365_test_00266144.jpg +Places365_test_00266156.jpg +Places365_test_00266160.jpg +Places365_test_00266161.jpg +Places365_test_00266175.jpg +Places365_test_00266183.jpg +Places365_test_00266199.jpg +Places365_test_00266218.jpg +Places365_test_00266226.jpg +Places365_test_00266228.jpg +Places365_test_00266237.jpg +Places365_test_00266238.jpg +Places365_test_00266240.jpg +Places365_test_00266243.jpg +Places365_test_00266255.jpg +Places365_test_00266258.jpg +Places365_test_00266263.jpg +Places365_test_00266266.jpg +Places365_test_00266273.jpg +Places365_test_00266279.jpg +Places365_test_00266298.jpg +Places365_test_00266299.jpg +Places365_test_00266304.jpg +Places365_test_00266306.jpg +Places365_test_00266333.jpg +Places365_test_00266341.jpg +Places365_test_00266344.jpg +Places365_test_00266346.jpg +Places365_test_00266351.jpg +Places365_test_00266359.jpg +Places365_test_00266361.jpg +Places365_test_00266367.jpg +Places365_test_00266370.jpg +Places365_test_00266377.jpg +Places365_test_00266389.jpg +Places365_test_00266401.jpg +Places365_test_00266403.jpg +Places365_test_00266404.jpg +Places365_test_00266408.jpg +Places365_test_00266417.jpg +Places365_test_00266427.jpg +Places365_test_00266428.jpg +Places365_test_00266430.jpg +Places365_test_00266448.jpg +Places365_test_00266458.jpg +Places365_test_00266459.jpg +Places365_test_00266478.jpg +Places365_test_00266479.jpg +Places365_test_00266486.jpg +Places365_test_00266492.jpg +Places365_test_00266497.jpg +Places365_test_00266500.jpg +Places365_test_00266539.jpg +Places365_test_00266542.jpg +Places365_test_00266550.jpg +Places365_test_00266569.jpg +Places365_test_00266572.jpg +Places365_test_00266595.jpg +Places365_test_00266596.jpg +Places365_test_00266601.jpg +Places365_test_00266619.jpg +Places365_test_00266625.jpg +Places365_test_00266628.jpg +Places365_test_00266672.jpg +Places365_test_00266680.jpg +Places365_test_00266690.jpg +Places365_test_00266704.jpg +Places365_test_00266707.jpg +Places365_test_00266715.jpg +Places365_test_00266734.jpg +Places365_test_00266735.jpg +Places365_test_00266736.jpg +Places365_test_00266754.jpg +Places365_test_00266764.jpg +Places365_test_00266780.jpg +Places365_test_00266783.jpg +Places365_test_00266785.jpg +Places365_test_00266806.jpg +Places365_test_00266858.jpg +Places365_test_00266860.jpg +Places365_test_00266865.jpg +Places365_test_00266872.jpg +Places365_test_00266876.jpg +Places365_test_00266882.jpg +Places365_test_00266884.jpg +Places365_test_00266894.jpg +Places365_test_00266909.jpg +Places365_test_00266917.jpg +Places365_test_00266920.jpg +Places365_test_00266959.jpg +Places365_test_00266968.jpg +Places365_test_00266975.jpg +Places365_test_00266981.jpg +Places365_test_00266995.jpg +Places365_test_00267009.jpg +Places365_test_00267010.jpg +Places365_test_00267030.jpg +Places365_test_00267051.jpg +Places365_test_00267066.jpg +Places365_test_00267075.jpg +Places365_test_00267079.jpg +Places365_test_00267082.jpg +Places365_test_00267084.jpg +Places365_test_00267086.jpg +Places365_test_00267088.jpg +Places365_test_00267093.jpg +Places365_test_00267096.jpg +Places365_test_00267099.jpg +Places365_test_00267100.jpg +Places365_test_00267103.jpg +Places365_test_00267114.jpg +Places365_test_00267126.jpg +Places365_test_00267133.jpg +Places365_test_00267138.jpg +Places365_test_00267146.jpg +Places365_test_00267162.jpg +Places365_test_00267178.jpg +Places365_test_00267181.jpg +Places365_test_00267183.jpg +Places365_test_00267194.jpg +Places365_test_00267209.jpg +Places365_test_00267226.jpg +Places365_test_00267242.jpg +Places365_test_00267255.jpg +Places365_test_00267267.jpg +Places365_test_00267272.jpg +Places365_test_00267274.jpg +Places365_test_00267279.jpg +Places365_test_00267313.jpg +Places365_test_00267314.jpg +Places365_test_00267323.jpg +Places365_test_00267337.jpg +Places365_test_00267371.jpg +Places365_test_00267377.jpg +Places365_test_00267384.jpg +Places365_test_00267390.jpg +Places365_test_00267414.jpg +Places365_test_00267428.jpg +Places365_test_00267434.jpg +Places365_test_00267453.jpg +Places365_test_00267458.jpg +Places365_test_00267461.jpg +Places365_test_00267470.jpg +Places365_test_00267473.jpg +Places365_test_00267487.jpg +Places365_test_00267499.jpg +Places365_test_00267505.jpg +Places365_test_00267525.jpg +Places365_test_00267543.jpg +Places365_test_00267554.jpg +Places365_test_00267558.jpg +Places365_test_00267559.jpg +Places365_test_00267572.jpg +Places365_test_00267575.jpg +Places365_test_00267582.jpg +Places365_test_00267583.jpg +Places365_test_00267590.jpg +Places365_test_00267596.jpg +Places365_test_00267606.jpg +Places365_test_00267612.jpg +Places365_test_00267639.jpg +Places365_test_00267650.jpg +Places365_test_00267651.jpg +Places365_test_00267660.jpg +Places365_test_00267662.jpg +Places365_test_00267666.jpg +Places365_test_00267678.jpg +Places365_test_00267693.jpg +Places365_test_00267707.jpg +Places365_test_00267718.jpg +Places365_test_00267725.jpg +Places365_test_00267744.jpg +Places365_test_00267746.jpg +Places365_test_00267762.jpg +Places365_test_00267776.jpg +Places365_test_00267784.jpg +Places365_test_00267789.jpg +Places365_test_00267812.jpg +Places365_test_00267833.jpg +Places365_test_00267840.jpg +Places365_test_00267855.jpg +Places365_test_00267868.jpg +Places365_test_00267872.jpg +Places365_test_00267875.jpg +Places365_test_00267890.jpg +Places365_test_00267901.jpg +Places365_test_00267917.jpg +Places365_test_00267927.jpg +Places365_test_00267936.jpg +Places365_test_00267945.jpg +Places365_test_00267949.jpg +Places365_test_00267970.jpg +Places365_test_00267973.jpg +Places365_test_00267996.jpg +Places365_test_00268012.jpg +Places365_test_00268034.jpg +Places365_test_00268042.jpg +Places365_test_00268049.jpg +Places365_test_00268079.jpg +Places365_test_00268091.jpg +Places365_test_00268094.jpg +Places365_test_00268117.jpg +Places365_test_00268125.jpg +Places365_test_00268160.jpg +Places365_test_00268168.jpg +Places365_test_00268169.jpg +Places365_test_00268178.jpg +Places365_test_00268190.jpg +Places365_test_00268198.jpg +Places365_test_00268202.jpg +Places365_test_00268220.jpg +Places365_test_00268225.jpg +Places365_test_00268231.jpg +Places365_test_00268240.jpg +Places365_test_00268261.jpg +Places365_test_00268282.jpg +Places365_test_00268311.jpg +Places365_test_00268322.jpg +Places365_test_00268323.jpg +Places365_test_00268340.jpg +Places365_test_00268350.jpg +Places365_test_00268354.jpg +Places365_test_00268391.jpg +Places365_test_00268393.jpg +Places365_test_00268469.jpg +Places365_test_00268529.jpg +Places365_test_00268538.jpg +Places365_test_00268595.jpg +Places365_test_00268608.jpg +Places365_test_00268629.jpg +Places365_test_00268638.jpg +Places365_test_00268643.jpg +Places365_test_00268646.jpg +Places365_test_00268656.jpg +Places365_test_00268661.jpg +Places365_test_00268665.jpg +Places365_test_00268695.jpg +Places365_test_00268696.jpg +Places365_test_00268699.jpg +Places365_test_00268708.jpg +Places365_test_00268709.jpg +Places365_test_00268726.jpg +Places365_test_00268733.jpg +Places365_test_00268768.jpg +Places365_test_00268773.jpg +Places365_test_00268776.jpg +Places365_test_00268779.jpg +Places365_test_00268784.jpg +Places365_test_00268794.jpg +Places365_test_00268795.jpg +Places365_test_00268796.jpg +Places365_test_00268820.jpg +Places365_test_00268822.jpg +Places365_test_00268842.jpg +Places365_test_00268868.jpg +Places365_test_00268882.jpg +Places365_test_00268897.jpg +Places365_test_00268903.jpg +Places365_test_00268904.jpg +Places365_test_00268921.jpg +Places365_test_00268929.jpg +Places365_test_00268945.jpg +Places365_test_00268947.jpg +Places365_test_00268950.jpg +Places365_test_00268980.jpg +Places365_test_00268985.jpg +Places365_test_00269003.jpg +Places365_test_00269058.jpg +Places365_test_00269067.jpg +Places365_test_00269075.jpg +Places365_test_00269078.jpg +Places365_test_00269092.jpg +Places365_test_00269118.jpg +Places365_test_00269119.jpg +Places365_test_00269124.jpg +Places365_test_00269127.jpg +Places365_test_00269140.jpg +Places365_test_00269171.jpg +Places365_test_00269181.jpg +Places365_test_00269200.jpg +Places365_test_00269205.jpg +Places365_test_00269209.jpg +Places365_test_00269223.jpg +Places365_test_00269230.jpg +Places365_test_00269239.jpg +Places365_test_00269253.jpg +Places365_test_00269275.jpg +Places365_test_00269278.jpg +Places365_test_00269279.jpg +Places365_test_00269286.jpg +Places365_test_00269309.jpg +Places365_test_00269317.jpg +Places365_test_00269327.jpg +Places365_test_00269343.jpg +Places365_test_00269344.jpg +Places365_test_00269352.jpg +Places365_test_00269358.jpg +Places365_test_00269371.jpg +Places365_test_00269432.jpg +Places365_test_00269441.jpg +Places365_test_00269456.jpg +Places365_test_00269480.jpg +Places365_test_00269486.jpg +Places365_test_00269496.jpg +Places365_test_00269500.jpg +Places365_test_00269502.jpg +Places365_test_00269505.jpg +Places365_test_00269512.jpg +Places365_test_00269519.jpg +Places365_test_00269531.jpg +Places365_test_00269536.jpg +Places365_test_00269539.jpg +Places365_test_00269546.jpg +Places365_test_00269552.jpg +Places365_test_00269555.jpg +Places365_test_00269566.jpg +Places365_test_00269586.jpg +Places365_test_00269606.jpg +Places365_test_00269621.jpg +Places365_test_00269639.jpg +Places365_test_00269653.jpg +Places365_test_00269654.jpg +Places365_test_00269656.jpg +Places365_test_00269660.jpg +Places365_test_00269662.jpg +Places365_test_00269678.jpg +Places365_test_00269679.jpg +Places365_test_00269699.jpg +Places365_test_00269703.jpg +Places365_test_00269706.jpg +Places365_test_00269723.jpg +Places365_test_00269757.jpg +Places365_test_00269763.jpg +Places365_test_00269774.jpg +Places365_test_00269796.jpg +Places365_test_00269800.jpg +Places365_test_00269812.jpg +Places365_test_00269815.jpg +Places365_test_00269839.jpg +Places365_test_00269840.jpg +Places365_test_00269844.jpg +Places365_test_00269850.jpg +Places365_test_00269871.jpg +Places365_test_00269876.jpg +Places365_test_00269882.jpg +Places365_test_00269888.jpg +Places365_test_00269895.jpg +Places365_test_00269901.jpg +Places365_test_00269904.jpg +Places365_test_00269942.jpg +Places365_test_00269956.jpg +Places365_test_00269957.jpg +Places365_test_00269968.jpg +Places365_test_00269973.jpg +Places365_test_00269997.jpg +Places365_test_00270005.jpg +Places365_test_00270009.jpg +Places365_test_00270012.jpg +Places365_test_00270018.jpg +Places365_test_00270063.jpg +Places365_test_00270069.jpg +Places365_test_00270072.jpg +Places365_test_00270082.jpg +Places365_test_00270089.jpg +Places365_test_00270091.jpg +Places365_test_00270095.jpg +Places365_test_00270102.jpg +Places365_test_00270109.jpg +Places365_test_00270119.jpg +Places365_test_00270128.jpg +Places365_test_00270161.jpg +Places365_test_00270169.jpg +Places365_test_00270185.jpg +Places365_test_00270193.jpg +Places365_test_00270218.jpg +Places365_test_00270232.jpg +Places365_test_00270233.jpg +Places365_test_00270259.jpg +Places365_test_00270265.jpg +Places365_test_00270271.jpg +Places365_test_00270279.jpg +Places365_test_00270280.jpg +Places365_test_00270286.jpg +Places365_test_00270290.jpg +Places365_test_00270294.jpg +Places365_test_00270305.jpg +Places365_test_00270319.jpg +Places365_test_00270322.jpg +Places365_test_00270346.jpg +Places365_test_00270354.jpg +Places365_test_00270364.jpg +Places365_test_00270374.jpg +Places365_test_00270380.jpg +Places365_test_00270389.jpg +Places365_test_00270391.jpg +Places365_test_00270393.jpg +Places365_test_00270399.jpg +Places365_test_00270431.jpg +Places365_test_00270436.jpg +Places365_test_00270438.jpg +Places365_test_00270470.jpg +Places365_test_00270474.jpg +Places365_test_00270476.jpg +Places365_test_00270492.jpg +Places365_test_00270494.jpg +Places365_test_00270502.jpg +Places365_test_00270514.jpg +Places365_test_00270521.jpg +Places365_test_00270543.jpg +Places365_test_00270554.jpg +Places365_test_00270566.jpg +Places365_test_00270580.jpg +Places365_test_00270582.jpg +Places365_test_00270584.jpg +Places365_test_00270609.jpg +Places365_test_00270621.jpg +Places365_test_00270624.jpg +Places365_test_00270645.jpg +Places365_test_00270647.jpg +Places365_test_00270659.jpg +Places365_test_00270660.jpg +Places365_test_00270664.jpg +Places365_test_00270679.jpg +Places365_test_00270689.jpg +Places365_test_00270693.jpg +Places365_test_00270696.jpg +Places365_test_00270702.jpg +Places365_test_00270712.jpg +Places365_test_00270719.jpg +Places365_test_00270736.jpg +Places365_test_00270741.jpg +Places365_test_00270754.jpg +Places365_test_00270768.jpg +Places365_test_00270776.jpg +Places365_test_00270778.jpg +Places365_test_00270782.jpg +Places365_test_00270787.jpg +Places365_test_00270790.jpg +Places365_test_00270791.jpg +Places365_test_00270799.jpg +Places365_test_00270802.jpg +Places365_test_00270815.jpg +Places365_test_00270820.jpg +Places365_test_00270823.jpg +Places365_test_00270834.jpg +Places365_test_00270835.jpg +Places365_test_00270838.jpg +Places365_test_00270840.jpg +Places365_test_00270850.jpg +Places365_test_00270856.jpg +Places365_test_00270879.jpg +Places365_test_00270888.jpg +Places365_test_00270890.jpg +Places365_test_00270891.jpg +Places365_test_00270908.jpg +Places365_test_00270909.jpg +Places365_test_00270910.jpg +Places365_test_00270916.jpg +Places365_test_00270921.jpg +Places365_test_00270926.jpg +Places365_test_00270935.jpg +Places365_test_00270937.jpg +Places365_test_00270948.jpg +Places365_test_00270958.jpg +Places365_test_00270965.jpg +Places365_test_00270968.jpg +Places365_test_00270971.jpg +Places365_test_00270978.jpg +Places365_test_00270981.jpg +Places365_test_00270995.jpg +Places365_test_00271011.jpg +Places365_test_00271013.jpg +Places365_test_00271026.jpg +Places365_test_00271039.jpg +Places365_test_00271054.jpg +Places365_test_00271070.jpg +Places365_test_00271091.jpg +Places365_test_00271098.jpg +Places365_test_00271128.jpg +Places365_test_00271131.jpg +Places365_test_00271159.jpg +Places365_test_00271188.jpg +Places365_test_00271219.jpg +Places365_test_00271222.jpg +Places365_test_00271223.jpg +Places365_test_00271228.jpg +Places365_test_00271240.jpg +Places365_test_00271249.jpg +Places365_test_00271265.jpg +Places365_test_00271273.jpg +Places365_test_00271274.jpg +Places365_test_00271276.jpg +Places365_test_00271282.jpg +Places365_test_00271288.jpg +Places365_test_00271322.jpg +Places365_test_00271332.jpg +Places365_test_00271352.jpg +Places365_test_00271370.jpg +Places365_test_00271372.jpg +Places365_test_00271388.jpg +Places365_test_00271469.jpg +Places365_test_00271472.jpg +Places365_test_00271474.jpg +Places365_test_00271481.jpg +Places365_test_00271489.jpg +Places365_test_00271496.jpg +Places365_test_00271509.jpg +Places365_test_00271511.jpg +Places365_test_00271522.jpg +Places365_test_00271524.jpg +Places365_test_00271528.jpg +Places365_test_00271539.jpg +Places365_test_00271552.jpg +Places365_test_00271558.jpg +Places365_test_00271565.jpg +Places365_test_00271570.jpg +Places365_test_00271573.jpg +Places365_test_00271576.jpg +Places365_test_00271591.jpg +Places365_test_00271611.jpg +Places365_test_00271622.jpg +Places365_test_00271630.jpg +Places365_test_00271643.jpg +Places365_test_00271655.jpg +Places365_test_00271657.jpg +Places365_test_00271664.jpg +Places365_test_00271672.jpg +Places365_test_00271681.jpg +Places365_test_00271715.jpg +Places365_test_00271723.jpg +Places365_test_00271731.jpg +Places365_test_00271747.jpg +Places365_test_00271758.jpg +Places365_test_00271762.jpg +Places365_test_00271784.jpg +Places365_test_00271785.jpg +Places365_test_00271800.jpg +Places365_test_00271805.jpg +Places365_test_00271808.jpg +Places365_test_00271815.jpg +Places365_test_00271823.jpg +Places365_test_00271828.jpg +Places365_test_00271874.jpg +Places365_test_00271889.jpg +Places365_test_00271900.jpg +Places365_test_00271905.jpg +Places365_test_00271941.jpg +Places365_test_00271955.jpg +Places365_test_00271972.jpg +Places365_test_00271985.jpg +Places365_test_00271993.jpg +Places365_test_00272006.jpg +Places365_test_00272019.jpg +Places365_test_00272033.jpg +Places365_test_00272055.jpg +Places365_test_00272059.jpg +Places365_test_00272063.jpg +Places365_test_00272066.jpg +Places365_test_00272088.jpg +Places365_test_00272094.jpg +Places365_test_00272099.jpg +Places365_test_00272104.jpg +Places365_test_00272120.jpg +Places365_test_00272136.jpg +Places365_test_00272151.jpg +Places365_test_00272153.jpg +Places365_test_00272171.jpg +Places365_test_00272177.jpg +Places365_test_00272181.jpg +Places365_test_00272185.jpg +Places365_test_00272188.jpg +Places365_test_00272196.jpg +Places365_test_00272199.jpg +Places365_test_00272231.jpg +Places365_test_00272232.jpg +Places365_test_00272263.jpg +Places365_test_00272264.jpg +Places365_test_00272266.jpg +Places365_test_00272267.jpg +Places365_test_00272280.jpg +Places365_test_00272316.jpg +Places365_test_00272319.jpg +Places365_test_00272325.jpg +Places365_test_00272331.jpg +Places365_test_00272346.jpg +Places365_test_00272353.jpg +Places365_test_00272397.jpg +Places365_test_00272403.jpg +Places365_test_00272440.jpg +Places365_test_00272466.jpg +Places365_test_00272469.jpg +Places365_test_00272471.jpg +Places365_test_00272478.jpg +Places365_test_00272493.jpg +Places365_test_00272495.jpg +Places365_test_00272505.jpg +Places365_test_00272526.jpg +Places365_test_00272528.jpg +Places365_test_00272529.jpg +Places365_test_00272553.jpg +Places365_test_00272579.jpg +Places365_test_00272597.jpg +Places365_test_00272604.jpg +Places365_test_00272620.jpg +Places365_test_00272648.jpg +Places365_test_00272654.jpg +Places365_test_00272678.jpg +Places365_test_00272712.jpg +Places365_test_00272722.jpg +Places365_test_00272724.jpg +Places365_test_00272743.jpg +Places365_test_00272744.jpg +Places365_test_00272753.jpg +Places365_test_00272767.jpg +Places365_test_00272787.jpg +Places365_test_00272797.jpg +Places365_test_00272798.jpg +Places365_test_00272801.jpg +Places365_test_00272807.jpg +Places365_test_00272819.jpg +Places365_test_00272823.jpg +Places365_test_00272828.jpg +Places365_test_00272854.jpg +Places365_test_00272857.jpg +Places365_test_00272861.jpg +Places365_test_00272867.jpg +Places365_test_00272871.jpg +Places365_test_00272877.jpg +Places365_test_00272878.jpg +Places365_test_00272882.jpg +Places365_test_00272883.jpg +Places365_test_00272887.jpg +Places365_test_00272889.jpg +Places365_test_00272899.jpg +Places365_test_00272929.jpg +Places365_test_00272939.jpg +Places365_test_00272943.jpg +Places365_test_00272945.jpg +Places365_test_00272954.jpg +Places365_test_00272970.jpg +Places365_test_00272972.jpg +Places365_test_00272986.jpg +Places365_test_00273046.jpg +Places365_test_00273056.jpg +Places365_test_00273059.jpg +Places365_test_00273090.jpg +Places365_test_00273092.jpg +Places365_test_00273094.jpg +Places365_test_00273111.jpg +Places365_test_00273112.jpg +Places365_test_00273126.jpg +Places365_test_00273134.jpg +Places365_test_00273142.jpg +Places365_test_00273154.jpg +Places365_test_00273171.jpg +Places365_test_00273177.jpg +Places365_test_00273209.jpg +Places365_test_00273220.jpg +Places365_test_00273232.jpg +Places365_test_00273268.jpg +Places365_test_00273307.jpg +Places365_test_00273311.jpg +Places365_test_00273320.jpg +Places365_test_00273348.jpg +Places365_test_00273349.jpg +Places365_test_00273354.jpg +Places365_test_00273360.jpg +Places365_test_00273383.jpg +Places365_test_00273401.jpg +Places365_test_00273407.jpg +Places365_test_00273429.jpg +Places365_test_00273434.jpg +Places365_test_00273439.jpg +Places365_test_00273442.jpg +Places365_test_00273445.jpg +Places365_test_00273453.jpg +Places365_test_00273455.jpg +Places365_test_00273476.jpg +Places365_test_00273485.jpg +Places365_test_00273487.jpg +Places365_test_00273497.jpg +Places365_test_00273510.jpg +Places365_test_00273524.jpg +Places365_test_00273531.jpg +Places365_test_00273552.jpg +Places365_test_00273555.jpg +Places365_test_00273604.jpg +Places365_test_00273614.jpg +Places365_test_00273626.jpg +Places365_test_00273642.jpg +Places365_test_00273647.jpg +Places365_test_00273677.jpg +Places365_test_00273679.jpg +Places365_test_00273696.jpg +Places365_test_00273709.jpg +Places365_test_00273718.jpg +Places365_test_00273725.jpg +Places365_test_00273737.jpg +Places365_test_00273739.jpg +Places365_test_00273752.jpg +Places365_test_00273754.jpg +Places365_test_00273757.jpg +Places365_test_00273770.jpg +Places365_test_00273800.jpg +Places365_test_00273802.jpg +Places365_test_00273804.jpg +Places365_test_00273809.jpg +Places365_test_00273820.jpg +Places365_test_00273832.jpg +Places365_test_00273836.jpg +Places365_test_00273855.jpg +Places365_test_00273860.jpg +Places365_test_00273864.jpg +Places365_test_00273865.jpg +Places365_test_00273870.jpg +Places365_test_00273918.jpg +Places365_test_00273927.jpg +Places365_test_00273933.jpg +Places365_test_00273949.jpg +Places365_test_00273980.jpg +Places365_test_00273981.jpg +Places365_test_00273993.jpg +Places365_test_00273994.jpg +Places365_test_00274000.jpg +Places365_test_00274030.jpg +Places365_test_00274031.jpg +Places365_test_00274057.jpg +Places365_test_00274060.jpg +Places365_test_00274074.jpg +Places365_test_00274079.jpg +Places365_test_00274089.jpg +Places365_test_00274092.jpg +Places365_test_00274106.jpg +Places365_test_00274158.jpg +Places365_test_00274165.jpg +Places365_test_00274193.jpg +Places365_test_00274196.jpg +Places365_test_00274206.jpg +Places365_test_00274236.jpg +Places365_test_00274240.jpg +Places365_test_00274248.jpg +Places365_test_00274250.jpg +Places365_test_00274261.jpg +Places365_test_00274275.jpg +Places365_test_00274276.jpg +Places365_test_00274294.jpg +Places365_test_00274341.jpg +Places365_test_00274371.jpg +Places365_test_00274379.jpg +Places365_test_00274385.jpg +Places365_test_00274387.jpg +Places365_test_00274397.jpg +Places365_test_00274427.jpg +Places365_test_00274428.jpg +Places365_test_00274432.jpg +Places365_test_00274436.jpg +Places365_test_00274442.jpg +Places365_test_00274457.jpg +Places365_test_00274474.jpg +Places365_test_00274477.jpg +Places365_test_00274479.jpg +Places365_test_00274483.jpg +Places365_test_00274490.jpg +Places365_test_00274508.jpg +Places365_test_00274511.jpg +Places365_test_00274530.jpg +Places365_test_00274547.jpg +Places365_test_00274550.jpg +Places365_test_00274560.jpg +Places365_test_00274561.jpg +Places365_test_00274594.jpg +Places365_test_00274607.jpg +Places365_test_00274615.jpg +Places365_test_00274619.jpg +Places365_test_00274631.jpg +Places365_test_00274637.jpg +Places365_test_00274660.jpg +Places365_test_00274682.jpg +Places365_test_00274683.jpg +Places365_test_00274688.jpg +Places365_test_00274700.jpg +Places365_test_00274712.jpg +Places365_test_00274732.jpg +Places365_test_00274734.jpg +Places365_test_00274740.jpg +Places365_test_00274781.jpg +Places365_test_00274791.jpg +Places365_test_00274814.jpg +Places365_test_00274837.jpg +Places365_test_00274840.jpg +Places365_test_00274846.jpg +Places365_test_00274868.jpg +Places365_test_00274869.jpg +Places365_test_00274870.jpg +Places365_test_00274875.jpg +Places365_test_00274890.jpg +Places365_test_00274899.jpg +Places365_test_00274920.jpg +Places365_test_00274928.jpg +Places365_test_00274942.jpg +Places365_test_00274966.jpg +Places365_test_00275001.jpg +Places365_test_00275002.jpg +Places365_test_00275004.jpg +Places365_test_00275009.jpg +Places365_test_00275012.jpg +Places365_test_00275018.jpg +Places365_test_00275024.jpg +Places365_test_00275034.jpg +Places365_test_00275041.jpg +Places365_test_00275044.jpg +Places365_test_00275070.jpg +Places365_test_00275074.jpg +Places365_test_00275093.jpg +Places365_test_00275097.jpg +Places365_test_00275120.jpg +Places365_test_00275121.jpg +Places365_test_00275124.jpg +Places365_test_00275130.jpg +Places365_test_00275131.jpg +Places365_test_00275140.jpg +Places365_test_00275144.jpg +Places365_test_00275163.jpg +Places365_test_00275165.jpg +Places365_test_00275194.jpg +Places365_test_00275206.jpg +Places365_test_00275219.jpg +Places365_test_00275223.jpg +Places365_test_00275231.jpg +Places365_test_00275232.jpg +Places365_test_00275244.jpg +Places365_test_00275259.jpg +Places365_test_00275287.jpg +Places365_test_00275293.jpg +Places365_test_00275299.jpg +Places365_test_00275329.jpg +Places365_test_00275331.jpg +Places365_test_00275335.jpg +Places365_test_00275338.jpg +Places365_test_00275347.jpg +Places365_test_00275348.jpg +Places365_test_00275354.jpg +Places365_test_00275357.jpg +Places365_test_00275421.jpg +Places365_test_00275437.jpg +Places365_test_00275440.jpg +Places365_test_00275446.jpg +Places365_test_00275450.jpg +Places365_test_00275451.jpg +Places365_test_00275454.jpg +Places365_test_00275480.jpg +Places365_test_00275483.jpg +Places365_test_00275511.jpg +Places365_test_00275540.jpg +Places365_test_00275545.jpg +Places365_test_00275554.jpg +Places365_test_00275564.jpg +Places365_test_00275566.jpg +Places365_test_00275581.jpg +Places365_test_00275619.jpg +Places365_test_00275632.jpg +Places365_test_00275633.jpg +Places365_test_00275635.jpg +Places365_test_00275636.jpg +Places365_test_00275646.jpg +Places365_test_00275661.jpg +Places365_test_00275667.jpg +Places365_test_00275681.jpg +Places365_test_00275682.jpg +Places365_test_00275687.jpg +Places365_test_00275688.jpg +Places365_test_00275695.jpg +Places365_test_00275697.jpg +Places365_test_00275702.jpg +Places365_test_00275720.jpg +Places365_test_00275731.jpg +Places365_test_00275749.jpg +Places365_test_00275756.jpg +Places365_test_00275772.jpg +Places365_test_00275782.jpg +Places365_test_00275783.jpg +Places365_test_00275795.jpg +Places365_test_00275800.jpg +Places365_test_00275843.jpg +Places365_test_00275846.jpg +Places365_test_00275856.jpg +Places365_test_00275859.jpg +Places365_test_00275873.jpg +Places365_test_00275886.jpg +Places365_test_00275899.jpg +Places365_test_00275900.jpg +Places365_test_00275908.jpg +Places365_test_00275918.jpg +Places365_test_00275923.jpg +Places365_test_00275933.jpg +Places365_test_00275965.jpg +Places365_test_00275971.jpg +Places365_test_00275980.jpg +Places365_test_00275990.jpg +Places365_test_00276003.jpg +Places365_test_00276006.jpg +Places365_test_00276014.jpg +Places365_test_00276023.jpg +Places365_test_00276051.jpg +Places365_test_00276053.jpg +Places365_test_00276059.jpg +Places365_test_00276060.jpg +Places365_test_00276069.jpg +Places365_test_00276076.jpg +Places365_test_00276087.jpg +Places365_test_00276098.jpg +Places365_test_00276099.jpg +Places365_test_00276106.jpg +Places365_test_00276121.jpg +Places365_test_00276176.jpg +Places365_test_00276185.jpg +Places365_test_00276193.jpg +Places365_test_00276200.jpg +Places365_test_00276216.jpg +Places365_test_00276217.jpg +Places365_test_00276227.jpg +Places365_test_00276237.jpg +Places365_test_00276243.jpg +Places365_test_00276264.jpg +Places365_test_00276267.jpg +Places365_test_00276280.jpg +Places365_test_00276287.jpg +Places365_test_00276296.jpg +Places365_test_00276301.jpg +Places365_test_00276303.jpg +Places365_test_00276337.jpg +Places365_test_00276353.jpg +Places365_test_00276364.jpg +Places365_test_00276374.jpg +Places365_test_00276380.jpg +Places365_test_00276383.jpg +Places365_test_00276384.jpg +Places365_test_00276390.jpg +Places365_test_00276395.jpg +Places365_test_00276396.jpg +Places365_test_00276400.jpg +Places365_test_00276419.jpg +Places365_test_00276422.jpg +Places365_test_00276430.jpg +Places365_test_00276431.jpg +Places365_test_00276439.jpg +Places365_test_00276447.jpg +Places365_test_00276478.jpg +Places365_test_00276482.jpg +Places365_test_00276486.jpg +Places365_test_00276495.jpg +Places365_test_00276499.jpg +Places365_test_00276500.jpg +Places365_test_00276516.jpg +Places365_test_00276524.jpg +Places365_test_00276528.jpg +Places365_test_00276530.jpg +Places365_test_00276545.jpg +Places365_test_00276546.jpg +Places365_test_00276559.jpg +Places365_test_00276560.jpg +Places365_test_00276564.jpg +Places365_test_00276575.jpg +Places365_test_00276583.jpg +Places365_test_00276596.jpg +Places365_test_00276621.jpg +Places365_test_00276633.jpg +Places365_test_00276638.jpg +Places365_test_00276648.jpg +Places365_test_00276649.jpg +Places365_test_00276650.jpg +Places365_test_00276652.jpg +Places365_test_00276658.jpg +Places365_test_00276662.jpg +Places365_test_00276665.jpg +Places365_test_00276667.jpg +Places365_test_00276674.jpg +Places365_test_00276675.jpg +Places365_test_00276697.jpg +Places365_test_00276716.jpg +Places365_test_00276720.jpg +Places365_test_00276721.jpg +Places365_test_00276735.jpg +Places365_test_00276736.jpg +Places365_test_00276748.jpg +Places365_test_00276757.jpg +Places365_test_00276767.jpg +Places365_test_00276775.jpg +Places365_test_00276777.jpg +Places365_test_00276780.jpg +Places365_test_00276792.jpg +Places365_test_00276797.jpg +Places365_test_00276799.jpg +Places365_test_00276812.jpg +Places365_test_00276815.jpg +Places365_test_00276819.jpg +Places365_test_00276826.jpg +Places365_test_00276841.jpg +Places365_test_00276848.jpg +Places365_test_00276885.jpg +Places365_test_00276890.jpg +Places365_test_00276910.jpg +Places365_test_00276924.jpg +Places365_test_00276925.jpg +Places365_test_00276933.jpg +Places365_test_00276939.jpg +Places365_test_00276944.jpg +Places365_test_00276960.jpg +Places365_test_00276971.jpg +Places365_test_00276995.jpg +Places365_test_00277006.jpg +Places365_test_00277008.jpg +Places365_test_00277022.jpg +Places365_test_00277032.jpg +Places365_test_00277058.jpg +Places365_test_00277063.jpg +Places365_test_00277065.jpg +Places365_test_00277079.jpg +Places365_test_00277087.jpg +Places365_test_00277100.jpg +Places365_test_00277105.jpg +Places365_test_00277108.jpg +Places365_test_00277111.jpg +Places365_test_00277112.jpg +Places365_test_00277118.jpg +Places365_test_00277125.jpg +Places365_test_00277128.jpg +Places365_test_00277146.jpg +Places365_test_00277175.jpg +Places365_test_00277185.jpg +Places365_test_00277192.jpg +Places365_test_00277195.jpg +Places365_test_00277213.jpg +Places365_test_00277216.jpg +Places365_test_00277218.jpg +Places365_test_00277224.jpg +Places365_test_00277226.jpg +Places365_test_00277227.jpg +Places365_test_00277228.jpg +Places365_test_00277231.jpg +Places365_test_00277238.jpg +Places365_test_00277246.jpg +Places365_test_00277247.jpg +Places365_test_00277254.jpg +Places365_test_00277259.jpg +Places365_test_00277283.jpg +Places365_test_00277291.jpg +Places365_test_00277292.jpg +Places365_test_00277301.jpg +Places365_test_00277306.jpg +Places365_test_00277337.jpg +Places365_test_00277342.jpg +Places365_test_00277344.jpg +Places365_test_00277358.jpg +Places365_test_00277417.jpg +Places365_test_00277466.jpg +Places365_test_00277470.jpg +Places365_test_00277472.jpg +Places365_test_00277473.jpg +Places365_test_00277485.jpg +Places365_test_00277498.jpg +Places365_test_00277518.jpg +Places365_test_00277527.jpg +Places365_test_00277536.jpg +Places365_test_00277545.jpg +Places365_test_00277549.jpg +Places365_test_00277568.jpg +Places365_test_00277578.jpg +Places365_test_00277584.jpg +Places365_test_00277616.jpg +Places365_test_00277635.jpg +Places365_test_00277637.jpg +Places365_test_00277667.jpg +Places365_test_00277676.jpg +Places365_test_00277682.jpg +Places365_test_00277683.jpg +Places365_test_00277713.jpg +Places365_test_00277738.jpg +Places365_test_00277745.jpg +Places365_test_00277754.jpg +Places365_test_00277778.jpg +Places365_test_00277782.jpg +Places365_test_00277792.jpg +Places365_test_00277797.jpg +Places365_test_00277798.jpg +Places365_test_00277805.jpg +Places365_test_00277806.jpg +Places365_test_00277808.jpg +Places365_test_00277809.jpg +Places365_test_00277818.jpg +Places365_test_00277844.jpg +Places365_test_00277850.jpg +Places365_test_00277871.jpg +Places365_test_00277873.jpg +Places365_test_00277889.jpg +Places365_test_00277892.jpg +Places365_test_00277902.jpg +Places365_test_00277903.jpg +Places365_test_00277906.jpg +Places365_test_00277918.jpg +Places365_test_00277929.jpg +Places365_test_00277966.jpg +Places365_test_00277982.jpg +Places365_test_00277984.jpg +Places365_test_00277991.jpg +Places365_test_00278002.jpg +Places365_test_00278010.jpg +Places365_test_00278029.jpg +Places365_test_00278044.jpg +Places365_test_00278070.jpg +Places365_test_00278091.jpg +Places365_test_00278113.jpg +Places365_test_00278117.jpg +Places365_test_00278121.jpg +Places365_test_00278134.jpg +Places365_test_00278144.jpg +Places365_test_00278151.jpg +Places365_test_00278153.jpg +Places365_test_00278161.jpg +Places365_test_00278172.jpg +Places365_test_00278187.jpg +Places365_test_00278204.jpg +Places365_test_00278208.jpg +Places365_test_00278211.jpg +Places365_test_00278217.jpg +Places365_test_00278218.jpg +Places365_test_00278220.jpg +Places365_test_00278226.jpg +Places365_test_00278228.jpg +Places365_test_00278235.jpg +Places365_test_00278260.jpg +Places365_test_00278264.jpg +Places365_test_00278276.jpg +Places365_test_00278280.jpg +Places365_test_00278281.jpg +Places365_test_00278317.jpg +Places365_test_00278325.jpg +Places365_test_00278343.jpg +Places365_test_00278356.jpg +Places365_test_00278366.jpg +Places365_test_00278370.jpg +Places365_test_00278389.jpg +Places365_test_00278391.jpg +Places365_test_00278406.jpg +Places365_test_00278412.jpg +Places365_test_00278413.jpg +Places365_test_00278431.jpg +Places365_test_00278432.jpg +Places365_test_00278437.jpg +Places365_test_00278440.jpg +Places365_test_00278456.jpg +Places365_test_00278472.jpg +Places365_test_00278483.jpg +Places365_test_00278490.jpg +Places365_test_00278500.jpg +Places365_test_00278509.jpg +Places365_test_00278529.jpg +Places365_test_00278535.jpg +Places365_test_00278562.jpg +Places365_test_00278566.jpg +Places365_test_00278579.jpg +Places365_test_00278581.jpg +Places365_test_00278585.jpg +Places365_test_00278596.jpg +Places365_test_00278600.jpg +Places365_test_00278603.jpg +Places365_test_00278614.jpg +Places365_test_00278625.jpg +Places365_test_00278633.jpg +Places365_test_00278638.jpg +Places365_test_00278646.jpg +Places365_test_00278654.jpg +Places365_test_00278667.jpg +Places365_test_00278673.jpg +Places365_test_00278683.jpg +Places365_test_00278702.jpg +Places365_test_00278708.jpg +Places365_test_00278712.jpg +Places365_test_00278731.jpg +Places365_test_00278740.jpg +Places365_test_00278767.jpg +Places365_test_00278789.jpg +Places365_test_00278797.jpg +Places365_test_00278816.jpg +Places365_test_00278817.jpg +Places365_test_00278829.jpg +Places365_test_00278836.jpg +Places365_test_00278842.jpg +Places365_test_00278850.jpg +Places365_test_00278854.jpg +Places365_test_00278856.jpg +Places365_test_00278858.jpg +Places365_test_00278862.jpg +Places365_test_00278875.jpg +Places365_test_00278879.jpg +Places365_test_00278890.jpg +Places365_test_00278892.jpg +Places365_test_00278909.jpg +Places365_test_00278962.jpg +Places365_test_00278964.jpg +Places365_test_00278966.jpg +Places365_test_00278987.jpg +Places365_test_00279012.jpg +Places365_test_00279018.jpg +Places365_test_00279029.jpg +Places365_test_00279038.jpg +Places365_test_00279045.jpg +Places365_test_00279049.jpg +Places365_test_00279057.jpg +Places365_test_00279071.jpg +Places365_test_00279080.jpg +Places365_test_00279086.jpg +Places365_test_00279090.jpg +Places365_test_00279091.jpg +Places365_test_00279092.jpg +Places365_test_00279094.jpg +Places365_test_00279099.jpg +Places365_test_00279104.jpg +Places365_test_00279114.jpg +Places365_test_00279122.jpg +Places365_test_00279124.jpg +Places365_test_00279128.jpg +Places365_test_00279129.jpg +Places365_test_00279133.jpg +Places365_test_00279152.jpg +Places365_test_00279154.jpg +Places365_test_00279160.jpg +Places365_test_00279161.jpg +Places365_test_00279164.jpg +Places365_test_00279168.jpg +Places365_test_00279170.jpg +Places365_test_00279178.jpg +Places365_test_00279180.jpg +Places365_test_00279188.jpg +Places365_test_00279191.jpg +Places365_test_00279196.jpg +Places365_test_00279199.jpg +Places365_test_00279219.jpg +Places365_test_00279220.jpg +Places365_test_00279222.jpg +Places365_test_00279225.jpg +Places365_test_00279233.jpg +Places365_test_00279257.jpg +Places365_test_00279261.jpg +Places365_test_00279264.jpg +Places365_test_00279267.jpg +Places365_test_00279287.jpg +Places365_test_00279292.jpg +Places365_test_00279307.jpg +Places365_test_00279308.jpg +Places365_test_00279318.jpg +Places365_test_00279334.jpg +Places365_test_00279340.jpg +Places365_test_00279343.jpg +Places365_test_00279362.jpg +Places365_test_00279389.jpg +Places365_test_00279392.jpg +Places365_test_00279395.jpg +Places365_test_00279405.jpg +Places365_test_00279407.jpg +Places365_test_00279414.jpg +Places365_test_00279417.jpg +Places365_test_00279429.jpg +Places365_test_00279433.jpg +Places365_test_00279437.jpg +Places365_test_00279458.jpg +Places365_test_00279459.jpg +Places365_test_00279460.jpg +Places365_test_00279472.jpg +Places365_test_00279490.jpg +Places365_test_00279522.jpg +Places365_test_00279527.jpg +Places365_test_00279558.jpg +Places365_test_00279570.jpg +Places365_test_00279573.jpg +Places365_test_00279600.jpg +Places365_test_00279608.jpg +Places365_test_00279628.jpg +Places365_test_00279634.jpg +Places365_test_00279639.jpg +Places365_test_00279669.jpg +Places365_test_00279675.jpg +Places365_test_00279689.jpg +Places365_test_00279696.jpg +Places365_test_00279720.jpg +Places365_test_00279729.jpg +Places365_test_00279731.jpg +Places365_test_00279735.jpg +Places365_test_00279738.jpg +Places365_test_00279761.jpg +Places365_test_00279762.jpg +Places365_test_00279766.jpg +Places365_test_00279777.jpg +Places365_test_00279783.jpg +Places365_test_00279787.jpg +Places365_test_00279788.jpg +Places365_test_00279812.jpg +Places365_test_00279820.jpg +Places365_test_00279830.jpg +Places365_test_00279848.jpg +Places365_test_00279851.jpg +Places365_test_00279859.jpg +Places365_test_00279877.jpg +Places365_test_00279878.jpg +Places365_test_00279888.jpg +Places365_test_00279889.jpg +Places365_test_00279890.jpg +Places365_test_00279895.jpg +Places365_test_00279905.jpg +Places365_test_00279909.jpg +Places365_test_00279918.jpg +Places365_test_00279930.jpg +Places365_test_00279939.jpg +Places365_test_00279945.jpg +Places365_test_00279991.jpg +Places365_test_00279995.jpg +Places365_test_00280006.jpg +Places365_test_00280022.jpg +Places365_test_00280023.jpg +Places365_test_00280030.jpg +Places365_test_00280045.jpg +Places365_test_00280056.jpg +Places365_test_00280068.jpg +Places365_test_00280087.jpg +Places365_test_00280102.jpg +Places365_test_00280116.jpg +Places365_test_00280118.jpg +Places365_test_00280123.jpg +Places365_test_00280131.jpg +Places365_test_00280134.jpg +Places365_test_00280154.jpg +Places365_test_00280155.jpg +Places365_test_00280176.jpg +Places365_test_00280208.jpg +Places365_test_00280217.jpg +Places365_test_00280219.jpg +Places365_test_00280226.jpg +Places365_test_00280238.jpg +Places365_test_00280241.jpg +Places365_test_00280253.jpg +Places365_test_00280264.jpg +Places365_test_00280284.jpg +Places365_test_00280300.jpg +Places365_test_00280331.jpg +Places365_test_00280339.jpg +Places365_test_00280356.jpg +Places365_test_00280362.jpg +Places365_test_00280384.jpg +Places365_test_00280401.jpg +Places365_test_00280409.jpg +Places365_test_00280441.jpg +Places365_test_00280443.jpg +Places365_test_00280451.jpg +Places365_test_00280460.jpg +Places365_test_00280462.jpg +Places365_test_00280472.jpg +Places365_test_00280481.jpg +Places365_test_00280499.jpg +Places365_test_00280506.jpg +Places365_test_00280508.jpg +Places365_test_00280528.jpg +Places365_test_00280558.jpg +Places365_test_00280562.jpg +Places365_test_00280567.jpg +Places365_test_00280584.jpg +Places365_test_00280586.jpg +Places365_test_00280600.jpg +Places365_test_00280616.jpg +Places365_test_00280627.jpg +Places365_test_00280637.jpg +Places365_test_00280638.jpg +Places365_test_00280644.jpg +Places365_test_00280663.jpg +Places365_test_00280683.jpg +Places365_test_00280684.jpg +Places365_test_00280687.jpg +Places365_test_00280703.jpg +Places365_test_00280704.jpg +Places365_test_00280707.jpg +Places365_test_00280708.jpg +Places365_test_00280745.jpg +Places365_test_00280752.jpg +Places365_test_00280754.jpg +Places365_test_00280757.jpg +Places365_test_00280758.jpg +Places365_test_00280763.jpg +Places365_test_00280764.jpg +Places365_test_00280769.jpg +Places365_test_00280770.jpg +Places365_test_00280812.jpg +Places365_test_00280813.jpg +Places365_test_00280815.jpg +Places365_test_00280819.jpg +Places365_test_00280859.jpg +Places365_test_00280862.jpg +Places365_test_00280918.jpg +Places365_test_00280922.jpg +Places365_test_00280931.jpg +Places365_test_00280941.jpg +Places365_test_00280944.jpg +Places365_test_00280945.jpg +Places365_test_00280967.jpg +Places365_test_00280979.jpg +Places365_test_00281004.jpg +Places365_test_00281057.jpg +Places365_test_00281061.jpg +Places365_test_00281070.jpg +Places365_test_00281079.jpg +Places365_test_00281105.jpg +Places365_test_00281116.jpg +Places365_test_00281138.jpg +Places365_test_00281139.jpg +Places365_test_00281155.jpg +Places365_test_00281162.jpg +Places365_test_00281182.jpg +Places365_test_00281200.jpg +Places365_test_00281212.jpg +Places365_test_00281224.jpg +Places365_test_00281227.jpg +Places365_test_00281233.jpg +Places365_test_00281244.jpg +Places365_test_00281261.jpg +Places365_test_00281265.jpg +Places365_test_00281269.jpg +Places365_test_00281271.jpg +Places365_test_00281313.jpg +Places365_test_00281314.jpg +Places365_test_00281324.jpg +Places365_test_00281326.jpg +Places365_test_00281328.jpg +Places365_test_00281355.jpg +Places365_test_00281357.jpg +Places365_test_00281368.jpg +Places365_test_00281377.jpg +Places365_test_00281384.jpg +Places365_test_00281418.jpg +Places365_test_00281423.jpg +Places365_test_00281433.jpg +Places365_test_00281441.jpg +Places365_test_00281446.jpg +Places365_test_00281450.jpg +Places365_test_00281466.jpg +Places365_test_00281474.jpg +Places365_test_00281479.jpg +Places365_test_00281493.jpg +Places365_test_00281516.jpg +Places365_test_00281526.jpg +Places365_test_00281558.jpg +Places365_test_00281567.jpg +Places365_test_00281568.jpg +Places365_test_00281570.jpg +Places365_test_00281594.jpg +Places365_test_00281604.jpg +Places365_test_00281606.jpg +Places365_test_00281608.jpg +Places365_test_00281633.jpg +Places365_test_00281639.jpg +Places365_test_00281647.jpg +Places365_test_00281648.jpg +Places365_test_00281657.jpg +Places365_test_00281665.jpg +Places365_test_00281669.jpg +Places365_test_00281688.jpg +Places365_test_00281701.jpg +Places365_test_00281717.jpg +Places365_test_00281729.jpg +Places365_test_00281741.jpg +Places365_test_00281748.jpg +Places365_test_00281749.jpg +Places365_test_00281777.jpg +Places365_test_00281797.jpg +Places365_test_00281819.jpg +Places365_test_00281827.jpg +Places365_test_00281831.jpg +Places365_test_00281849.jpg +Places365_test_00281852.jpg +Places365_test_00281875.jpg +Places365_test_00281878.jpg +Places365_test_00281885.jpg +Places365_test_00281894.jpg +Places365_test_00281917.jpg +Places365_test_00281922.jpg +Places365_test_00281928.jpg +Places365_test_00281934.jpg +Places365_test_00281953.jpg +Places365_test_00281969.jpg +Places365_test_00281987.jpg +Places365_test_00282003.jpg +Places365_test_00282007.jpg +Places365_test_00282012.jpg +Places365_test_00282014.jpg +Places365_test_00282021.jpg +Places365_test_00282030.jpg +Places365_test_00282032.jpg +Places365_test_00282045.jpg +Places365_test_00282049.jpg +Places365_test_00282065.jpg +Places365_test_00282082.jpg +Places365_test_00282088.jpg +Places365_test_00282089.jpg +Places365_test_00282091.jpg +Places365_test_00282093.jpg +Places365_test_00282103.jpg +Places365_test_00282105.jpg +Places365_test_00282113.jpg +Places365_test_00282123.jpg +Places365_test_00282130.jpg +Places365_test_00282149.jpg +Places365_test_00282195.jpg +Places365_test_00282199.jpg +Places365_test_00282214.jpg +Places365_test_00282223.jpg +Places365_test_00282234.jpg +Places365_test_00282250.jpg +Places365_test_00282252.jpg +Places365_test_00282257.jpg +Places365_test_00282266.jpg +Places365_test_00282269.jpg +Places365_test_00282287.jpg +Places365_test_00282300.jpg +Places365_test_00282303.jpg +Places365_test_00282304.jpg +Places365_test_00282308.jpg +Places365_test_00282326.jpg +Places365_test_00282331.jpg +Places365_test_00282341.jpg +Places365_test_00282360.jpg +Places365_test_00282371.jpg +Places365_test_00282374.jpg +Places365_test_00282375.jpg +Places365_test_00282376.jpg +Places365_test_00282414.jpg +Places365_test_00282483.jpg +Places365_test_00282492.jpg +Places365_test_00282494.jpg +Places365_test_00282503.jpg +Places365_test_00282552.jpg +Places365_test_00282563.jpg +Places365_test_00282564.jpg +Places365_test_00282577.jpg +Places365_test_00282588.jpg +Places365_test_00282591.jpg +Places365_test_00282624.jpg +Places365_test_00282626.jpg +Places365_test_00282627.jpg +Places365_test_00282637.jpg +Places365_test_00282648.jpg +Places365_test_00282660.jpg +Places365_test_00282662.jpg +Places365_test_00282668.jpg +Places365_test_00282673.jpg +Places365_test_00282676.jpg +Places365_test_00282686.jpg +Places365_test_00282693.jpg +Places365_test_00282712.jpg +Places365_test_00282755.jpg +Places365_test_00282758.jpg +Places365_test_00282775.jpg +Places365_test_00282777.jpg +Places365_test_00282783.jpg +Places365_test_00282795.jpg +Places365_test_00282818.jpg +Places365_test_00282822.jpg +Places365_test_00282830.jpg +Places365_test_00282831.jpg +Places365_test_00282848.jpg +Places365_test_00282862.jpg +Places365_test_00282864.jpg +Places365_test_00282865.jpg +Places365_test_00282867.jpg +Places365_test_00282879.jpg +Places365_test_00282886.jpg +Places365_test_00282892.jpg +Places365_test_00282893.jpg +Places365_test_00282897.jpg +Places365_test_00282901.jpg +Places365_test_00282905.jpg +Places365_test_00282916.jpg +Places365_test_00282924.jpg +Places365_test_00282933.jpg +Places365_test_00282940.jpg +Places365_test_00282942.jpg +Places365_test_00282946.jpg +Places365_test_00282947.jpg +Places365_test_00282972.jpg +Places365_test_00282980.jpg +Places365_test_00282986.jpg +Places365_test_00282992.jpg +Places365_test_00282994.jpg +Places365_test_00283018.jpg +Places365_test_00283020.jpg +Places365_test_00283040.jpg +Places365_test_00283058.jpg +Places365_test_00283073.jpg +Places365_test_00283085.jpg +Places365_test_00283087.jpg +Places365_test_00283117.jpg +Places365_test_00283124.jpg +Places365_test_00283161.jpg +Places365_test_00283189.jpg +Places365_test_00283195.jpg +Places365_test_00283204.jpg +Places365_test_00283245.jpg +Places365_test_00283249.jpg +Places365_test_00283261.jpg +Places365_test_00283264.jpg +Places365_test_00283279.jpg +Places365_test_00283318.jpg +Places365_test_00283319.jpg +Places365_test_00283334.jpg +Places365_test_00283339.jpg +Places365_test_00283343.jpg +Places365_test_00283352.jpg +Places365_test_00283353.jpg +Places365_test_00283354.jpg +Places365_test_00283423.jpg +Places365_test_00283428.jpg +Places365_test_00283431.jpg +Places365_test_00283435.jpg +Places365_test_00283447.jpg +Places365_test_00283475.jpg +Places365_test_00283484.jpg +Places365_test_00283488.jpg +Places365_test_00283523.jpg +Places365_test_00283549.jpg +Places365_test_00283558.jpg +Places365_test_00283573.jpg +Places365_test_00283576.jpg +Places365_test_00283592.jpg +Places365_test_00283595.jpg +Places365_test_00283603.jpg +Places365_test_00283606.jpg +Places365_test_00283627.jpg +Places365_test_00283629.jpg +Places365_test_00283640.jpg +Places365_test_00283670.jpg +Places365_test_00283671.jpg +Places365_test_00283679.jpg +Places365_test_00283691.jpg +Places365_test_00283730.jpg +Places365_test_00283733.jpg +Places365_test_00283747.jpg +Places365_test_00283754.jpg +Places365_test_00283796.jpg +Places365_test_00283804.jpg +Places365_test_00283829.jpg +Places365_test_00283864.jpg +Places365_test_00283867.jpg +Places365_test_00283876.jpg +Places365_test_00283891.jpg +Places365_test_00283932.jpg +Places365_test_00283946.jpg +Places365_test_00283949.jpg +Places365_test_00283950.jpg +Places365_test_00283961.jpg +Places365_test_00283978.jpg +Places365_test_00283998.jpg +Places365_test_00284020.jpg +Places365_test_00284027.jpg +Places365_test_00284039.jpg +Places365_test_00284048.jpg +Places365_test_00284059.jpg +Places365_test_00284060.jpg +Places365_test_00284066.jpg +Places365_test_00284077.jpg +Places365_test_00284086.jpg +Places365_test_00284113.jpg +Places365_test_00284115.jpg +Places365_test_00284131.jpg +Places365_test_00284136.jpg +Places365_test_00284147.jpg +Places365_test_00284161.jpg +Places365_test_00284170.jpg +Places365_test_00284172.jpg +Places365_test_00284195.jpg +Places365_test_00284210.jpg +Places365_test_00284244.jpg +Places365_test_00284251.jpg +Places365_test_00284274.jpg +Places365_test_00284283.jpg +Places365_test_00284292.jpg +Places365_test_00284323.jpg +Places365_test_00284330.jpg +Places365_test_00284338.jpg +Places365_test_00284340.jpg +Places365_test_00284341.jpg +Places365_test_00284346.jpg +Places365_test_00284360.jpg +Places365_test_00284371.jpg +Places365_test_00284374.jpg +Places365_test_00284377.jpg +Places365_test_00284387.jpg +Places365_test_00284394.jpg +Places365_test_00284403.jpg +Places365_test_00284405.jpg +Places365_test_00284417.jpg +Places365_test_00284418.jpg +Places365_test_00284427.jpg +Places365_test_00284438.jpg +Places365_test_00284440.jpg +Places365_test_00284444.jpg +Places365_test_00284445.jpg +Places365_test_00284453.jpg +Places365_test_00284455.jpg +Places365_test_00284457.jpg +Places365_test_00284459.jpg +Places365_test_00284462.jpg +Places365_test_00284469.jpg +Places365_test_00284499.jpg +Places365_test_00284503.jpg +Places365_test_00284519.jpg +Places365_test_00284531.jpg +Places365_test_00284534.jpg +Places365_test_00284554.jpg +Places365_test_00284563.jpg +Places365_test_00284565.jpg +Places365_test_00284578.jpg +Places365_test_00284583.jpg +Places365_test_00284587.jpg +Places365_test_00284599.jpg +Places365_test_00284600.jpg +Places365_test_00284614.jpg +Places365_test_00284624.jpg +Places365_test_00284633.jpg +Places365_test_00284640.jpg +Places365_test_00284654.jpg +Places365_test_00284655.jpg +Places365_test_00284668.jpg +Places365_test_00284682.jpg +Places365_test_00284711.jpg +Places365_test_00284720.jpg +Places365_test_00284725.jpg +Places365_test_00284726.jpg +Places365_test_00284753.jpg +Places365_test_00284767.jpg +Places365_test_00284808.jpg +Places365_test_00284833.jpg +Places365_test_00284858.jpg +Places365_test_00284881.jpg +Places365_test_00284884.jpg +Places365_test_00284889.jpg +Places365_test_00284913.jpg +Places365_test_00284914.jpg +Places365_test_00284925.jpg +Places365_test_00284939.jpg +Places365_test_00284942.jpg +Places365_test_00284948.jpg +Places365_test_00284959.jpg +Places365_test_00284964.jpg +Places365_test_00284966.jpg +Places365_test_00284986.jpg +Places365_test_00284991.jpg +Places365_test_00285009.jpg +Places365_test_00285014.jpg +Places365_test_00285030.jpg +Places365_test_00285052.jpg +Places365_test_00285059.jpg +Places365_test_00285076.jpg +Places365_test_00285078.jpg +Places365_test_00285084.jpg +Places365_test_00285088.jpg +Places365_test_00285089.jpg +Places365_test_00285102.jpg +Places365_test_00285110.jpg +Places365_test_00285116.jpg +Places365_test_00285121.jpg +Places365_test_00285136.jpg +Places365_test_00285163.jpg +Places365_test_00285198.jpg +Places365_test_00285205.jpg +Places365_test_00285220.jpg +Places365_test_00285230.jpg +Places365_test_00285236.jpg +Places365_test_00285237.jpg +Places365_test_00285244.jpg +Places365_test_00285251.jpg +Places365_test_00285252.jpg +Places365_test_00285274.jpg +Places365_test_00285275.jpg +Places365_test_00285281.jpg +Places365_test_00285304.jpg +Places365_test_00285305.jpg +Places365_test_00285307.jpg +Places365_test_00285314.jpg +Places365_test_00285330.jpg +Places365_test_00285332.jpg +Places365_test_00285333.jpg +Places365_test_00285340.jpg +Places365_test_00285359.jpg +Places365_test_00285360.jpg +Places365_test_00285371.jpg +Places365_test_00285372.jpg +Places365_test_00285373.jpg +Places365_test_00285392.jpg +Places365_test_00285441.jpg +Places365_test_00285449.jpg +Places365_test_00285452.jpg +Places365_test_00285456.jpg +Places365_test_00285466.jpg +Places365_test_00285473.jpg +Places365_test_00285475.jpg +Places365_test_00285477.jpg +Places365_test_00285523.jpg +Places365_test_00285535.jpg +Places365_test_00285542.jpg +Places365_test_00285600.jpg +Places365_test_00285613.jpg +Places365_test_00285632.jpg +Places365_test_00285638.jpg +Places365_test_00285640.jpg +Places365_test_00285645.jpg +Places365_test_00285646.jpg +Places365_test_00285675.jpg +Places365_test_00285698.jpg +Places365_test_00285703.jpg +Places365_test_00285712.jpg +Places365_test_00285720.jpg +Places365_test_00285727.jpg +Places365_test_00285732.jpg +Places365_test_00285744.jpg +Places365_test_00285747.jpg +Places365_test_00285748.jpg +Places365_test_00285767.jpg +Places365_test_00285770.jpg +Places365_test_00285800.jpg +Places365_test_00285817.jpg +Places365_test_00285821.jpg +Places365_test_00285835.jpg +Places365_test_00285847.jpg +Places365_test_00285884.jpg +Places365_test_00285891.jpg +Places365_test_00285904.jpg +Places365_test_00285908.jpg +Places365_test_00285910.jpg +Places365_test_00285911.jpg +Places365_test_00285928.jpg +Places365_test_00285935.jpg +Places365_test_00285937.jpg +Places365_test_00285946.jpg +Places365_test_00285972.jpg +Places365_test_00285991.jpg +Places365_test_00285998.jpg +Places365_test_00286009.jpg +Places365_test_00286013.jpg +Places365_test_00286015.jpg +Places365_test_00286026.jpg +Places365_test_00286030.jpg +Places365_test_00286032.jpg +Places365_test_00286043.jpg +Places365_test_00286051.jpg +Places365_test_00286065.jpg +Places365_test_00286086.jpg +Places365_test_00286089.jpg +Places365_test_00286113.jpg +Places365_test_00286115.jpg +Places365_test_00286119.jpg +Places365_test_00286125.jpg +Places365_test_00286131.jpg +Places365_test_00286148.jpg +Places365_test_00286149.jpg +Places365_test_00286153.jpg +Places365_test_00286155.jpg +Places365_test_00286158.jpg +Places365_test_00286159.jpg +Places365_test_00286197.jpg +Places365_test_00286205.jpg +Places365_test_00286214.jpg +Places365_test_00286220.jpg +Places365_test_00286221.jpg +Places365_test_00286245.jpg +Places365_test_00286263.jpg +Places365_test_00286268.jpg +Places365_test_00286279.jpg +Places365_test_00286309.jpg +Places365_test_00286310.jpg +Places365_test_00286320.jpg +Places365_test_00286348.jpg +Places365_test_00286362.jpg +Places365_test_00286363.jpg +Places365_test_00286365.jpg +Places365_test_00286373.jpg +Places365_test_00286404.jpg +Places365_test_00286414.jpg +Places365_test_00286426.jpg +Places365_test_00286427.jpg +Places365_test_00286435.jpg +Places365_test_00286437.jpg +Places365_test_00286439.jpg +Places365_test_00286441.jpg +Places365_test_00286448.jpg +Places365_test_00286453.jpg +Places365_test_00286456.jpg +Places365_test_00286465.jpg +Places365_test_00286477.jpg +Places365_test_00286485.jpg +Places365_test_00286497.jpg +Places365_test_00286499.jpg +Places365_test_00286503.jpg +Places365_test_00286506.jpg +Places365_test_00286514.jpg +Places365_test_00286526.jpg +Places365_test_00286527.jpg +Places365_test_00286558.jpg +Places365_test_00286565.jpg +Places365_test_00286573.jpg +Places365_test_00286574.jpg +Places365_test_00286586.jpg +Places365_test_00286592.jpg +Places365_test_00286593.jpg +Places365_test_00286594.jpg +Places365_test_00286620.jpg +Places365_test_00286642.jpg +Places365_test_00286658.jpg +Places365_test_00286662.jpg +Places365_test_00286663.jpg +Places365_test_00286665.jpg +Places365_test_00286670.jpg +Places365_test_00286685.jpg +Places365_test_00286687.jpg +Places365_test_00286700.jpg +Places365_test_00286711.jpg +Places365_test_00286728.jpg +Places365_test_00286729.jpg +Places365_test_00286744.jpg +Places365_test_00286749.jpg +Places365_test_00286769.jpg +Places365_test_00286773.jpg +Places365_test_00286774.jpg +Places365_test_00286782.jpg +Places365_test_00286794.jpg +Places365_test_00286822.jpg +Places365_test_00286831.jpg +Places365_test_00286835.jpg +Places365_test_00286839.jpg +Places365_test_00286843.jpg +Places365_test_00286845.jpg +Places365_test_00286850.jpg +Places365_test_00286862.jpg +Places365_test_00286866.jpg +Places365_test_00286905.jpg +Places365_test_00286908.jpg +Places365_test_00286909.jpg +Places365_test_00286919.jpg +Places365_test_00286924.jpg +Places365_test_00286934.jpg +Places365_test_00286971.jpg +Places365_test_00286979.jpg +Places365_test_00286995.jpg +Places365_test_00286999.jpg +Places365_test_00287010.jpg +Places365_test_00287015.jpg +Places365_test_00287017.jpg +Places365_test_00287031.jpg +Places365_test_00287035.jpg +Places365_test_00287044.jpg +Places365_test_00287049.jpg +Places365_test_00287061.jpg +Places365_test_00287073.jpg +Places365_test_00287094.jpg +Places365_test_00287110.jpg +Places365_test_00287112.jpg +Places365_test_00287136.jpg +Places365_test_00287144.jpg +Places365_test_00287156.jpg +Places365_test_00287166.jpg +Places365_test_00287168.jpg +Places365_test_00287187.jpg +Places365_test_00287198.jpg +Places365_test_00287207.jpg +Places365_test_00287212.jpg +Places365_test_00287215.jpg +Places365_test_00287235.jpg +Places365_test_00287237.jpg +Places365_test_00287255.jpg +Places365_test_00287258.jpg +Places365_test_00287267.jpg +Places365_test_00287283.jpg +Places365_test_00287287.jpg +Places365_test_00287327.jpg +Places365_test_00287331.jpg +Places365_test_00287333.jpg +Places365_test_00287346.jpg +Places365_test_00287351.jpg +Places365_test_00287354.jpg +Places365_test_00287358.jpg +Places365_test_00287361.jpg +Places365_test_00287370.jpg +Places365_test_00287378.jpg +Places365_test_00287384.jpg +Places365_test_00287389.jpg +Places365_test_00287394.jpg +Places365_test_00287398.jpg +Places365_test_00287402.jpg +Places365_test_00287415.jpg +Places365_test_00287418.jpg +Places365_test_00287423.jpg +Places365_test_00287437.jpg +Places365_test_00287441.jpg +Places365_test_00287465.jpg +Places365_test_00287467.jpg +Places365_test_00287479.jpg +Places365_test_00287503.jpg +Places365_test_00287505.jpg +Places365_test_00287506.jpg +Places365_test_00287508.jpg +Places365_test_00287513.jpg +Places365_test_00287558.jpg +Places365_test_00287560.jpg +Places365_test_00287608.jpg +Places365_test_00287615.jpg +Places365_test_00287616.jpg +Places365_test_00287639.jpg +Places365_test_00287644.jpg +Places365_test_00287646.jpg +Places365_test_00287696.jpg +Places365_test_00287698.jpg +Places365_test_00287708.jpg +Places365_test_00287709.jpg +Places365_test_00287713.jpg +Places365_test_00287727.jpg +Places365_test_00287730.jpg +Places365_test_00287739.jpg +Places365_test_00287741.jpg +Places365_test_00287757.jpg +Places365_test_00287759.jpg +Places365_test_00287766.jpg +Places365_test_00287769.jpg +Places365_test_00287776.jpg +Places365_test_00287795.jpg +Places365_test_00287797.jpg +Places365_test_00287807.jpg +Places365_test_00287825.jpg +Places365_test_00287830.jpg +Places365_test_00287836.jpg +Places365_test_00287858.jpg +Places365_test_00287866.jpg +Places365_test_00287874.jpg +Places365_test_00287879.jpg +Places365_test_00287881.jpg +Places365_test_00287894.jpg +Places365_test_00287900.jpg +Places365_test_00287909.jpg +Places365_test_00287915.jpg +Places365_test_00287936.jpg +Places365_test_00287939.jpg +Places365_test_00287949.jpg +Places365_test_00287964.jpg +Places365_test_00287965.jpg +Places365_test_00287969.jpg +Places365_test_00287977.jpg +Places365_test_00287987.jpg +Places365_test_00288007.jpg +Places365_test_00288012.jpg +Places365_test_00288016.jpg +Places365_test_00288033.jpg +Places365_test_00288051.jpg +Places365_test_00288053.jpg +Places365_test_00288058.jpg +Places365_test_00288086.jpg +Places365_test_00288089.jpg +Places365_test_00288101.jpg +Places365_test_00288103.jpg +Places365_test_00288117.jpg +Places365_test_00288135.jpg +Places365_test_00288139.jpg +Places365_test_00288144.jpg +Places365_test_00288148.jpg +Places365_test_00288152.jpg +Places365_test_00288161.jpg +Places365_test_00288180.jpg +Places365_test_00288185.jpg +Places365_test_00288187.jpg +Places365_test_00288197.jpg +Places365_test_00288198.jpg +Places365_test_00288199.jpg +Places365_test_00288200.jpg +Places365_test_00288217.jpg +Places365_test_00288225.jpg +Places365_test_00288246.jpg +Places365_test_00288249.jpg +Places365_test_00288260.jpg +Places365_test_00288268.jpg +Places365_test_00288283.jpg +Places365_test_00288286.jpg +Places365_test_00288303.jpg +Places365_test_00288305.jpg +Places365_test_00288319.jpg +Places365_test_00288327.jpg +Places365_test_00288328.jpg +Places365_test_00288329.jpg +Places365_test_00288343.jpg +Places365_test_00288351.jpg +Places365_test_00288360.jpg +Places365_test_00288369.jpg +Places365_test_00288414.jpg +Places365_test_00288417.jpg +Places365_test_00288436.jpg +Places365_test_00288443.jpg +Places365_test_00288452.jpg +Places365_test_00288461.jpg +Places365_test_00288477.jpg +Places365_test_00288516.jpg +Places365_test_00288529.jpg +Places365_test_00288530.jpg +Places365_test_00288536.jpg +Places365_test_00288545.jpg +Places365_test_00288549.jpg +Places365_test_00288557.jpg +Places365_test_00288559.jpg +Places365_test_00288564.jpg +Places365_test_00288568.jpg +Places365_test_00288575.jpg +Places365_test_00288601.jpg +Places365_test_00288612.jpg +Places365_test_00288617.jpg +Places365_test_00288620.jpg +Places365_test_00288629.jpg +Places365_test_00288630.jpg +Places365_test_00288652.jpg +Places365_test_00288655.jpg +Places365_test_00288665.jpg +Places365_test_00288666.jpg +Places365_test_00288671.jpg +Places365_test_00288674.jpg +Places365_test_00288697.jpg +Places365_test_00288701.jpg +Places365_test_00288707.jpg +Places365_test_00288713.jpg +Places365_test_00288715.jpg +Places365_test_00288731.jpg +Places365_test_00288734.jpg +Places365_test_00288756.jpg +Places365_test_00288766.jpg +Places365_test_00288778.jpg +Places365_test_00288780.jpg +Places365_test_00288784.jpg +Places365_test_00288794.jpg +Places365_test_00288797.jpg +Places365_test_00288798.jpg +Places365_test_00288814.jpg +Places365_test_00288844.jpg +Places365_test_00288849.jpg +Places365_test_00288859.jpg +Places365_test_00288865.jpg +Places365_test_00288869.jpg +Places365_test_00288893.jpg +Places365_test_00288902.jpg +Places365_test_00288904.jpg +Places365_test_00288912.jpg +Places365_test_00288915.jpg +Places365_test_00288924.jpg +Places365_test_00288945.jpg +Places365_test_00288946.jpg +Places365_test_00288962.jpg +Places365_test_00288965.jpg +Places365_test_00288979.jpg +Places365_test_00289011.jpg +Places365_test_00289015.jpg +Places365_test_00289018.jpg +Places365_test_00289032.jpg +Places365_test_00289055.jpg +Places365_test_00289081.jpg +Places365_test_00289110.jpg +Places365_test_00289119.jpg +Places365_test_00289122.jpg +Places365_test_00289162.jpg +Places365_test_00289166.jpg +Places365_test_00289201.jpg +Places365_test_00289205.jpg +Places365_test_00289210.jpg +Places365_test_00289212.jpg +Places365_test_00289245.jpg +Places365_test_00289259.jpg +Places365_test_00289271.jpg +Places365_test_00289275.jpg +Places365_test_00289288.jpg +Places365_test_00289298.jpg +Places365_test_00289321.jpg +Places365_test_00289344.jpg +Places365_test_00289345.jpg +Places365_test_00289350.jpg +Places365_test_00289384.jpg +Places365_test_00289401.jpg +Places365_test_00289425.jpg +Places365_test_00289427.jpg +Places365_test_00289435.jpg +Places365_test_00289442.jpg +Places365_test_00289451.jpg +Places365_test_00289489.jpg +Places365_test_00289503.jpg +Places365_test_00289511.jpg +Places365_test_00289538.jpg +Places365_test_00289539.jpg +Places365_test_00289555.jpg +Places365_test_00289566.jpg +Places365_test_00289578.jpg +Places365_test_00289597.jpg +Places365_test_00289607.jpg +Places365_test_00289610.jpg +Places365_test_00289615.jpg +Places365_test_00289620.jpg +Places365_test_00289633.jpg +Places365_test_00289640.jpg +Places365_test_00289641.jpg +Places365_test_00289658.jpg +Places365_test_00289677.jpg +Places365_test_00289685.jpg +Places365_test_00289689.jpg +Places365_test_00289699.jpg +Places365_test_00289701.jpg +Places365_test_00289704.jpg +Places365_test_00289714.jpg +Places365_test_00289718.jpg +Places365_test_00289734.jpg +Places365_test_00289737.jpg +Places365_test_00289739.jpg +Places365_test_00289761.jpg +Places365_test_00289766.jpg +Places365_test_00289782.jpg +Places365_test_00289793.jpg +Places365_test_00289808.jpg +Places365_test_00289814.jpg +Places365_test_00289817.jpg +Places365_test_00289842.jpg +Places365_test_00289857.jpg +Places365_test_00289866.jpg +Places365_test_00289868.jpg +Places365_test_00289884.jpg +Places365_test_00289899.jpg +Places365_test_00289900.jpg +Places365_test_00289920.jpg +Places365_test_00289926.jpg +Places365_test_00289928.jpg +Places365_test_00289937.jpg +Places365_test_00289947.jpg +Places365_test_00289966.jpg +Places365_test_00289971.jpg +Places365_test_00289986.jpg +Places365_test_00289989.jpg +Places365_test_00290026.jpg +Places365_test_00290028.jpg +Places365_test_00290054.jpg +Places365_test_00290057.jpg +Places365_test_00290060.jpg +Places365_test_00290073.jpg +Places365_test_00290082.jpg +Places365_test_00290091.jpg +Places365_test_00290101.jpg +Places365_test_00290115.jpg +Places365_test_00290120.jpg +Places365_test_00290129.jpg +Places365_test_00290133.jpg +Places365_test_00290135.jpg +Places365_test_00290149.jpg +Places365_test_00290166.jpg +Places365_test_00290171.jpg +Places365_test_00290173.jpg +Places365_test_00290194.jpg +Places365_test_00290203.jpg +Places365_test_00290215.jpg +Places365_test_00290227.jpg +Places365_test_00290230.jpg +Places365_test_00290232.jpg +Places365_test_00290236.jpg +Places365_test_00290241.jpg +Places365_test_00290253.jpg +Places365_test_00290259.jpg +Places365_test_00290300.jpg +Places365_test_00290318.jpg +Places365_test_00290321.jpg +Places365_test_00290336.jpg +Places365_test_00290347.jpg +Places365_test_00290348.jpg +Places365_test_00290349.jpg +Places365_test_00290386.jpg +Places365_test_00290388.jpg +Places365_test_00290389.jpg +Places365_test_00290393.jpg +Places365_test_00290394.jpg +Places365_test_00290396.jpg +Places365_test_00290416.jpg +Places365_test_00290427.jpg +Places365_test_00290441.jpg +Places365_test_00290449.jpg +Places365_test_00290450.jpg +Places365_test_00290458.jpg +Places365_test_00290464.jpg +Places365_test_00290483.jpg +Places365_test_00290506.jpg +Places365_test_00290507.jpg +Places365_test_00290519.jpg +Places365_test_00290522.jpg +Places365_test_00290527.jpg +Places365_test_00290535.jpg +Places365_test_00290540.jpg +Places365_test_00290556.jpg +Places365_test_00290564.jpg +Places365_test_00290579.jpg +Places365_test_00290585.jpg +Places365_test_00290587.jpg +Places365_test_00290590.jpg +Places365_test_00290591.jpg +Places365_test_00290604.jpg +Places365_test_00290605.jpg +Places365_test_00290608.jpg +Places365_test_00290620.jpg +Places365_test_00290639.jpg +Places365_test_00290651.jpg +Places365_test_00290652.jpg +Places365_test_00290659.jpg +Places365_test_00290672.jpg +Places365_test_00290674.jpg +Places365_test_00290679.jpg +Places365_test_00290688.jpg +Places365_test_00290690.jpg +Places365_test_00290696.jpg +Places365_test_00290716.jpg +Places365_test_00290759.jpg +Places365_test_00290772.jpg +Places365_test_00290778.jpg +Places365_test_00290801.jpg +Places365_test_00290805.jpg +Places365_test_00290819.jpg +Places365_test_00290830.jpg +Places365_test_00290836.jpg +Places365_test_00290839.jpg +Places365_test_00290858.jpg +Places365_test_00290874.jpg +Places365_test_00290884.jpg +Places365_test_00290919.jpg +Places365_test_00290924.jpg +Places365_test_00290937.jpg +Places365_test_00290951.jpg +Places365_test_00290953.jpg +Places365_test_00290963.jpg +Places365_test_00291002.jpg +Places365_test_00291004.jpg +Places365_test_00291020.jpg +Places365_test_00291021.jpg +Places365_test_00291035.jpg +Places365_test_00291042.jpg +Places365_test_00291050.jpg +Places365_test_00291054.jpg +Places365_test_00291067.jpg +Places365_test_00291078.jpg +Places365_test_00291095.jpg +Places365_test_00291111.jpg +Places365_test_00291126.jpg +Places365_test_00291139.jpg +Places365_test_00291141.jpg +Places365_test_00291163.jpg +Places365_test_00291167.jpg +Places365_test_00291175.jpg +Places365_test_00291181.jpg +Places365_test_00291215.jpg +Places365_test_00291257.jpg +Places365_test_00291260.jpg +Places365_test_00291263.jpg +Places365_test_00291265.jpg +Places365_test_00291278.jpg +Places365_test_00291288.jpg +Places365_test_00291292.jpg +Places365_test_00291300.jpg +Places365_test_00291308.jpg +Places365_test_00291314.jpg +Places365_test_00291319.jpg +Places365_test_00291331.jpg +Places365_test_00291341.jpg +Places365_test_00291367.jpg +Places365_test_00291380.jpg +Places365_test_00291399.jpg +Places365_test_00291403.jpg +Places365_test_00291423.jpg +Places365_test_00291429.jpg +Places365_test_00291440.jpg +Places365_test_00291455.jpg +Places365_test_00291458.jpg +Places365_test_00291469.jpg +Places365_test_00291488.jpg +Places365_test_00291490.jpg +Places365_test_00291512.jpg +Places365_test_00291517.jpg +Places365_test_00291530.jpg +Places365_test_00291532.jpg +Places365_test_00291534.jpg +Places365_test_00291538.jpg +Places365_test_00291550.jpg +Places365_test_00291556.jpg +Places365_test_00291557.jpg +Places365_test_00291559.jpg +Places365_test_00291560.jpg +Places365_test_00291568.jpg +Places365_test_00291574.jpg +Places365_test_00291592.jpg +Places365_test_00291594.jpg +Places365_test_00291616.jpg +Places365_test_00291620.jpg +Places365_test_00291656.jpg +Places365_test_00291680.jpg +Places365_test_00291703.jpg +Places365_test_00291713.jpg +Places365_test_00291718.jpg +Places365_test_00291723.jpg +Places365_test_00291759.jpg +Places365_test_00291761.jpg +Places365_test_00291777.jpg +Places365_test_00291793.jpg +Places365_test_00291794.jpg +Places365_test_00291803.jpg +Places365_test_00291806.jpg +Places365_test_00291828.jpg +Places365_test_00291831.jpg +Places365_test_00291832.jpg +Places365_test_00291844.jpg +Places365_test_00291850.jpg +Places365_test_00291854.jpg +Places365_test_00291877.jpg +Places365_test_00291882.jpg +Places365_test_00291894.jpg +Places365_test_00291920.jpg +Places365_test_00291921.jpg +Places365_test_00291932.jpg +Places365_test_00291948.jpg +Places365_test_00291961.jpg +Places365_test_00291996.jpg +Places365_test_00291998.jpg +Places365_test_00292013.jpg +Places365_test_00292015.jpg +Places365_test_00292021.jpg +Places365_test_00292024.jpg +Places365_test_00292035.jpg +Places365_test_00292036.jpg +Places365_test_00292052.jpg +Places365_test_00292064.jpg +Places365_test_00292066.jpg +Places365_test_00292077.jpg +Places365_test_00292078.jpg +Places365_test_00292095.jpg +Places365_test_00292104.jpg +Places365_test_00292132.jpg +Places365_test_00292135.jpg +Places365_test_00292144.jpg +Places365_test_00292146.jpg +Places365_test_00292151.jpg +Places365_test_00292173.jpg +Places365_test_00292187.jpg +Places365_test_00292227.jpg +Places365_test_00292238.jpg +Places365_test_00292245.jpg +Places365_test_00292246.jpg +Places365_test_00292258.jpg +Places365_test_00292261.jpg +Places365_test_00292275.jpg +Places365_test_00292288.jpg +Places365_test_00292293.jpg +Places365_test_00292297.jpg +Places365_test_00292298.jpg +Places365_test_00292299.jpg +Places365_test_00292311.jpg +Places365_test_00292325.jpg +Places365_test_00292337.jpg +Places365_test_00292339.jpg +Places365_test_00292340.jpg +Places365_test_00292351.jpg +Places365_test_00292361.jpg +Places365_test_00292369.jpg +Places365_test_00292382.jpg +Places365_test_00292400.jpg +Places365_test_00292418.jpg +Places365_test_00292425.jpg +Places365_test_00292429.jpg +Places365_test_00292446.jpg +Places365_test_00292453.jpg +Places365_test_00292469.jpg +Places365_test_00292485.jpg +Places365_test_00292493.jpg +Places365_test_00292557.jpg +Places365_test_00292566.jpg +Places365_test_00292574.jpg +Places365_test_00292577.jpg +Places365_test_00292582.jpg +Places365_test_00292592.jpg +Places365_test_00292622.jpg +Places365_test_00292641.jpg +Places365_test_00292655.jpg +Places365_test_00292660.jpg +Places365_test_00292712.jpg +Places365_test_00292716.jpg +Places365_test_00292717.jpg +Places365_test_00292720.jpg +Places365_test_00292731.jpg +Places365_test_00292743.jpg +Places365_test_00292748.jpg +Places365_test_00292772.jpg +Places365_test_00292800.jpg +Places365_test_00292809.jpg +Places365_test_00292812.jpg +Places365_test_00292813.jpg +Places365_test_00292843.jpg +Places365_test_00292853.jpg +Places365_test_00292891.jpg +Places365_test_00292895.jpg +Places365_test_00292899.jpg +Places365_test_00292901.jpg +Places365_test_00292912.jpg +Places365_test_00292930.jpg +Places365_test_00292939.jpg +Places365_test_00292942.jpg +Places365_test_00292944.jpg +Places365_test_00292967.jpg +Places365_test_00292975.jpg +Places365_test_00292985.jpg +Places365_test_00292992.jpg +Places365_test_00292999.jpg +Places365_test_00293009.jpg +Places365_test_00293023.jpg +Places365_test_00293033.jpg +Places365_test_00293038.jpg +Places365_test_00293039.jpg +Places365_test_00293041.jpg +Places365_test_00293056.jpg +Places365_test_00293082.jpg +Places365_test_00293138.jpg +Places365_test_00293141.jpg +Places365_test_00293142.jpg +Places365_test_00293166.jpg +Places365_test_00293173.jpg +Places365_test_00293182.jpg +Places365_test_00293192.jpg +Places365_test_00293198.jpg +Places365_test_00293200.jpg +Places365_test_00293215.jpg +Places365_test_00293247.jpg +Places365_test_00293251.jpg +Places365_test_00293258.jpg +Places365_test_00293274.jpg +Places365_test_00293275.jpg +Places365_test_00293290.jpg +Places365_test_00293308.jpg +Places365_test_00293313.jpg +Places365_test_00293317.jpg +Places365_test_00293325.jpg +Places365_test_00293327.jpg +Places365_test_00293343.jpg +Places365_test_00293352.jpg +Places365_test_00293386.jpg +Places365_test_00293392.jpg +Places365_test_00293404.jpg +Places365_test_00293405.jpg +Places365_test_00293414.jpg +Places365_test_00293419.jpg +Places365_test_00293423.jpg +Places365_test_00293435.jpg +Places365_test_00293436.jpg +Places365_test_00293438.jpg +Places365_test_00293465.jpg +Places365_test_00293480.jpg +Places365_test_00293485.jpg +Places365_test_00293488.jpg +Places365_test_00293498.jpg +Places365_test_00293506.jpg +Places365_test_00293510.jpg +Places365_test_00293526.jpg +Places365_test_00293527.jpg +Places365_test_00293532.jpg +Places365_test_00293534.jpg +Places365_test_00293553.jpg +Places365_test_00293562.jpg +Places365_test_00293575.jpg +Places365_test_00293580.jpg +Places365_test_00293584.jpg +Places365_test_00293596.jpg +Places365_test_00293604.jpg +Places365_test_00293608.jpg +Places365_test_00293614.jpg +Places365_test_00293616.jpg +Places365_test_00293621.jpg +Places365_test_00293624.jpg +Places365_test_00293627.jpg +Places365_test_00293640.jpg +Places365_test_00293645.jpg +Places365_test_00293650.jpg +Places365_test_00293655.jpg +Places365_test_00293678.jpg +Places365_test_00293693.jpg +Places365_test_00293706.jpg +Places365_test_00293707.jpg +Places365_test_00293718.jpg +Places365_test_00293719.jpg +Places365_test_00293722.jpg +Places365_test_00293730.jpg +Places365_test_00293731.jpg +Places365_test_00293745.jpg +Places365_test_00293749.jpg +Places365_test_00293759.jpg +Places365_test_00293763.jpg +Places365_test_00293769.jpg +Places365_test_00293789.jpg +Places365_test_00293802.jpg +Places365_test_00293829.jpg +Places365_test_00293830.jpg +Places365_test_00293840.jpg +Places365_test_00293841.jpg +Places365_test_00293896.jpg +Places365_test_00293902.jpg +Places365_test_00293932.jpg +Places365_test_00293935.jpg +Places365_test_00293942.jpg +Places365_test_00293949.jpg +Places365_test_00293951.jpg +Places365_test_00293960.jpg +Places365_test_00293965.jpg +Places365_test_00293967.jpg +Places365_test_00294006.jpg +Places365_test_00294036.jpg +Places365_test_00294078.jpg +Places365_test_00294106.jpg +Places365_test_00294120.jpg +Places365_test_00294123.jpg +Places365_test_00294132.jpg +Places365_test_00294140.jpg +Places365_test_00294141.jpg +Places365_test_00294151.jpg +Places365_test_00294154.jpg +Places365_test_00294157.jpg +Places365_test_00294159.jpg +Places365_test_00294161.jpg +Places365_test_00294178.jpg +Places365_test_00294193.jpg +Places365_test_00294199.jpg +Places365_test_00294207.jpg +Places365_test_00294208.jpg +Places365_test_00294211.jpg +Places365_test_00294218.jpg +Places365_test_00294223.jpg +Places365_test_00294224.jpg +Places365_test_00294234.jpg +Places365_test_00294247.jpg +Places365_test_00294252.jpg +Places365_test_00294264.jpg +Places365_test_00294267.jpg +Places365_test_00294291.jpg +Places365_test_00294312.jpg +Places365_test_00294320.jpg +Places365_test_00294338.jpg +Places365_test_00294349.jpg +Places365_test_00294350.jpg +Places365_test_00294351.jpg +Places365_test_00294360.jpg +Places365_test_00294421.jpg +Places365_test_00294432.jpg +Places365_test_00294439.jpg +Places365_test_00294446.jpg +Places365_test_00294460.jpg +Places365_test_00294476.jpg +Places365_test_00294501.jpg +Places365_test_00294539.jpg +Places365_test_00294546.jpg +Places365_test_00294553.jpg +Places365_test_00294560.jpg +Places365_test_00294582.jpg +Places365_test_00294592.jpg +Places365_test_00294593.jpg +Places365_test_00294623.jpg +Places365_test_00294625.jpg +Places365_test_00294640.jpg +Places365_test_00294643.jpg +Places365_test_00294651.jpg +Places365_test_00294675.jpg +Places365_test_00294686.jpg +Places365_test_00294701.jpg +Places365_test_00294714.jpg +Places365_test_00294715.jpg +Places365_test_00294720.jpg +Places365_test_00294737.jpg +Places365_test_00294783.jpg +Places365_test_00294830.jpg +Places365_test_00294831.jpg +Places365_test_00294832.jpg +Places365_test_00294834.jpg +Places365_test_00294843.jpg +Places365_test_00294851.jpg +Places365_test_00294867.jpg +Places365_test_00294877.jpg +Places365_test_00294898.jpg +Places365_test_00294905.jpg +Places365_test_00294920.jpg +Places365_test_00294942.jpg +Places365_test_00294968.jpg +Places365_test_00294974.jpg +Places365_test_00294976.jpg +Places365_test_00294986.jpg +Places365_test_00294999.jpg +Places365_test_00295028.jpg +Places365_test_00295049.jpg +Places365_test_00295052.jpg +Places365_test_00295056.jpg +Places365_test_00295068.jpg +Places365_test_00295080.jpg +Places365_test_00295101.jpg +Places365_test_00295108.jpg +Places365_test_00295128.jpg +Places365_test_00295147.jpg +Places365_test_00295157.jpg +Places365_test_00295172.jpg +Places365_test_00295185.jpg +Places365_test_00295195.jpg +Places365_test_00295204.jpg +Places365_test_00295205.jpg +Places365_test_00295206.jpg +Places365_test_00295211.jpg +Places365_test_00295228.jpg +Places365_test_00295237.jpg +Places365_test_00295245.jpg +Places365_test_00295247.jpg +Places365_test_00295250.jpg +Places365_test_00295262.jpg +Places365_test_00295301.jpg +Places365_test_00295307.jpg +Places365_test_00295335.jpg +Places365_test_00295336.jpg +Places365_test_00295353.jpg +Places365_test_00295359.jpg +Places365_test_00295372.jpg +Places365_test_00295393.jpg +Places365_test_00295399.jpg +Places365_test_00295402.jpg +Places365_test_00295425.jpg +Places365_test_00295442.jpg +Places365_test_00295463.jpg +Places365_test_00295481.jpg +Places365_test_00295490.jpg +Places365_test_00295495.jpg +Places365_test_00295497.jpg +Places365_test_00295499.jpg +Places365_test_00295503.jpg +Places365_test_00295507.jpg +Places365_test_00295514.jpg +Places365_test_00295521.jpg +Places365_test_00295522.jpg +Places365_test_00295527.jpg +Places365_test_00295531.jpg +Places365_test_00295536.jpg +Places365_test_00295565.jpg +Places365_test_00295570.jpg +Places365_test_00295573.jpg +Places365_test_00295583.jpg +Places365_test_00295598.jpg +Places365_test_00295615.jpg +Places365_test_00295618.jpg +Places365_test_00295622.jpg +Places365_test_00295627.jpg +Places365_test_00295630.jpg +Places365_test_00295639.jpg +Places365_test_00295648.jpg +Places365_test_00295658.jpg +Places365_test_00295687.jpg +Places365_test_00295692.jpg +Places365_test_00295696.jpg +Places365_test_00295717.jpg +Places365_test_00295722.jpg +Places365_test_00295729.jpg +Places365_test_00295749.jpg +Places365_test_00295758.jpg +Places365_test_00295761.jpg +Places365_test_00295767.jpg +Places365_test_00295771.jpg +Places365_test_00295781.jpg +Places365_test_00295803.jpg +Places365_test_00295841.jpg +Places365_test_00295865.jpg +Places365_test_00295882.jpg +Places365_test_00295887.jpg +Places365_test_00295906.jpg +Places365_test_00295924.jpg +Places365_test_00295936.jpg +Places365_test_00295940.jpg +Places365_test_00295944.jpg +Places365_test_00295947.jpg +Places365_test_00295986.jpg +Places365_test_00295995.jpg +Places365_test_00295997.jpg +Places365_test_00296007.jpg +Places365_test_00296011.jpg +Places365_test_00296019.jpg +Places365_test_00296021.jpg +Places365_test_00296034.jpg +Places365_test_00296039.jpg +Places365_test_00296049.jpg +Places365_test_00296066.jpg +Places365_test_00296068.jpg +Places365_test_00296076.jpg +Places365_test_00296081.jpg +Places365_test_00296107.jpg +Places365_test_00296108.jpg +Places365_test_00296119.jpg +Places365_test_00296126.jpg +Places365_test_00296131.jpg +Places365_test_00296137.jpg +Places365_test_00296152.jpg +Places365_test_00296198.jpg +Places365_test_00296207.jpg +Places365_test_00296234.jpg +Places365_test_00296250.jpg +Places365_test_00296257.jpg +Places365_test_00296276.jpg +Places365_test_00296302.jpg +Places365_test_00296309.jpg +Places365_test_00296342.jpg +Places365_test_00296344.jpg +Places365_test_00296361.jpg +Places365_test_00296363.jpg +Places365_test_00296367.jpg +Places365_test_00296422.jpg +Places365_test_00296425.jpg +Places365_test_00296448.jpg +Places365_test_00296449.jpg +Places365_test_00296472.jpg +Places365_test_00296473.jpg +Places365_test_00296478.jpg +Places365_test_00296507.jpg +Places365_test_00296536.jpg +Places365_test_00296569.jpg +Places365_test_00296592.jpg +Places365_test_00296616.jpg +Places365_test_00296632.jpg +Places365_test_00296657.jpg +Places365_test_00296667.jpg +Places365_test_00296677.jpg +Places365_test_00296688.jpg +Places365_test_00296699.jpg +Places365_test_00296718.jpg +Places365_test_00296732.jpg +Places365_test_00296735.jpg +Places365_test_00296743.jpg +Places365_test_00296800.jpg +Places365_test_00296815.jpg +Places365_test_00296826.jpg +Places365_test_00296828.jpg +Places365_test_00296833.jpg +Places365_test_00296846.jpg +Places365_test_00296848.jpg +Places365_test_00296850.jpg +Places365_test_00296856.jpg +Places365_test_00296867.jpg +Places365_test_00296878.jpg +Places365_test_00296892.jpg +Places365_test_00296906.jpg +Places365_test_00296928.jpg +Places365_test_00296938.jpg +Places365_test_00296944.jpg +Places365_test_00296957.jpg +Places365_test_00296965.jpg +Places365_test_00296970.jpg +Places365_test_00296971.jpg +Places365_test_00296984.jpg +Places365_test_00296987.jpg +Places365_test_00296992.jpg +Places365_test_00297000.jpg +Places365_test_00297003.jpg +Places365_test_00297005.jpg +Places365_test_00297011.jpg +Places365_test_00297012.jpg +Places365_test_00297056.jpg +Places365_test_00297058.jpg +Places365_test_00297064.jpg +Places365_test_00297065.jpg +Places365_test_00297070.jpg +Places365_test_00297075.jpg +Places365_test_00297078.jpg +Places365_test_00297082.jpg +Places365_test_00297094.jpg +Places365_test_00297102.jpg +Places365_test_00297106.jpg +Places365_test_00297112.jpg +Places365_test_00297121.jpg +Places365_test_00297141.jpg +Places365_test_00297155.jpg +Places365_test_00297161.jpg +Places365_test_00297163.jpg +Places365_test_00297174.jpg +Places365_test_00297181.jpg +Places365_test_00297231.jpg +Places365_test_00297238.jpg +Places365_test_00297240.jpg +Places365_test_00297262.jpg +Places365_test_00297265.jpg +Places365_test_00297277.jpg +Places365_test_00297280.jpg +Places365_test_00297293.jpg +Places365_test_00297299.jpg +Places365_test_00297314.jpg +Places365_test_00297321.jpg +Places365_test_00297371.jpg +Places365_test_00297377.jpg +Places365_test_00297392.jpg +Places365_test_00297401.jpg +Places365_test_00297402.jpg +Places365_test_00297403.jpg +Places365_test_00297410.jpg +Places365_test_00297423.jpg +Places365_test_00297450.jpg +Places365_test_00297455.jpg +Places365_test_00297486.jpg +Places365_test_00297490.jpg +Places365_test_00297503.jpg +Places365_test_00297506.jpg +Places365_test_00297507.jpg +Places365_test_00297509.jpg +Places365_test_00297530.jpg +Places365_test_00297531.jpg +Places365_test_00297547.jpg +Places365_test_00297552.jpg +Places365_test_00297554.jpg +Places365_test_00297555.jpg +Places365_test_00297626.jpg +Places365_test_00297654.jpg +Places365_test_00297664.jpg +Places365_test_00297667.jpg +Places365_test_00297685.jpg +Places365_test_00297694.jpg +Places365_test_00297697.jpg +Places365_test_00297713.jpg +Places365_test_00297716.jpg +Places365_test_00297726.jpg +Places365_test_00297738.jpg +Places365_test_00297739.jpg +Places365_test_00297740.jpg +Places365_test_00297768.jpg +Places365_test_00297784.jpg +Places365_test_00297798.jpg +Places365_test_00297800.jpg +Places365_test_00297803.jpg +Places365_test_00297835.jpg +Places365_test_00297852.jpg +Places365_test_00297862.jpg +Places365_test_00297869.jpg +Places365_test_00297870.jpg +Places365_test_00297880.jpg +Places365_test_00297899.jpg +Places365_test_00297909.jpg +Places365_test_00297917.jpg +Places365_test_00297919.jpg +Places365_test_00297923.jpg +Places365_test_00297936.jpg +Places365_test_00297941.jpg +Places365_test_00297942.jpg +Places365_test_00297953.jpg +Places365_test_00297993.jpg +Places365_test_00297995.jpg +Places365_test_00297997.jpg +Places365_test_00297998.jpg +Places365_test_00298000.jpg +Places365_test_00298025.jpg +Places365_test_00298038.jpg +Places365_test_00298055.jpg +Places365_test_00298071.jpg +Places365_test_00298074.jpg +Places365_test_00298078.jpg +Places365_test_00298090.jpg +Places365_test_00298094.jpg +Places365_test_00298128.jpg +Places365_test_00298130.jpg +Places365_test_00298145.jpg +Places365_test_00298156.jpg +Places365_test_00298159.jpg +Places365_test_00298163.jpg +Places365_test_00298177.jpg +Places365_test_00298194.jpg +Places365_test_00298195.jpg +Places365_test_00298200.jpg +Places365_test_00298221.jpg +Places365_test_00298222.jpg +Places365_test_00298242.jpg +Places365_test_00298252.jpg +Places365_test_00298278.jpg +Places365_test_00298288.jpg +Places365_test_00298292.jpg +Places365_test_00298301.jpg +Places365_test_00298302.jpg +Places365_test_00298313.jpg +Places365_test_00298314.jpg +Places365_test_00298325.jpg +Places365_test_00298331.jpg +Places365_test_00298339.jpg +Places365_test_00298358.jpg +Places365_test_00298366.jpg +Places365_test_00298384.jpg +Places365_test_00298388.jpg +Places365_test_00298391.jpg +Places365_test_00298392.jpg +Places365_test_00298395.jpg +Places365_test_00298411.jpg +Places365_test_00298444.jpg +Places365_test_00298462.jpg +Places365_test_00298474.jpg +Places365_test_00298478.jpg +Places365_test_00298484.jpg +Places365_test_00298486.jpg +Places365_test_00298501.jpg +Places365_test_00298504.jpg +Places365_test_00298506.jpg +Places365_test_00298517.jpg +Places365_test_00298539.jpg +Places365_test_00298548.jpg +Places365_test_00298564.jpg +Places365_test_00298572.jpg +Places365_test_00298573.jpg +Places365_test_00298579.jpg +Places365_test_00298621.jpg +Places365_test_00298661.jpg +Places365_test_00298669.jpg +Places365_test_00298671.jpg +Places365_test_00298685.jpg +Places365_test_00298693.jpg +Places365_test_00298728.jpg +Places365_test_00298746.jpg +Places365_test_00298750.jpg +Places365_test_00298752.jpg +Places365_test_00298753.jpg +Places365_test_00298754.jpg +Places365_test_00298759.jpg +Places365_test_00298773.jpg +Places365_test_00298777.jpg +Places365_test_00298779.jpg +Places365_test_00298782.jpg +Places365_test_00298784.jpg +Places365_test_00298795.jpg +Places365_test_00298799.jpg +Places365_test_00298807.jpg +Places365_test_00298813.jpg +Places365_test_00298814.jpg +Places365_test_00298815.jpg +Places365_test_00298821.jpg +Places365_test_00298830.jpg +Places365_test_00298845.jpg +Places365_test_00298869.jpg +Places365_test_00298879.jpg +Places365_test_00298894.jpg +Places365_test_00298969.jpg +Places365_test_00298981.jpg +Places365_test_00298992.jpg +Places365_test_00298993.jpg +Places365_test_00298994.jpg +Places365_test_00298999.jpg +Places365_test_00299027.jpg +Places365_test_00299028.jpg +Places365_test_00299057.jpg +Places365_test_00299060.jpg +Places365_test_00299064.jpg +Places365_test_00299067.jpg +Places365_test_00299091.jpg +Places365_test_00299092.jpg +Places365_test_00299106.jpg +Places365_test_00299118.jpg +Places365_test_00299121.jpg +Places365_test_00299133.jpg +Places365_test_00299136.jpg +Places365_test_00299142.jpg +Places365_test_00299149.jpg +Places365_test_00299158.jpg +Places365_test_00299160.jpg +Places365_test_00299173.jpg +Places365_test_00299182.jpg +Places365_test_00299186.jpg +Places365_test_00299207.jpg +Places365_test_00299219.jpg +Places365_test_00299220.jpg +Places365_test_00299221.jpg +Places365_test_00299224.jpg +Places365_test_00299237.jpg +Places365_test_00299238.jpg +Places365_test_00299289.jpg +Places365_test_00299304.jpg +Places365_test_00299320.jpg +Places365_test_00299321.jpg +Places365_test_00299325.jpg +Places365_test_00299333.jpg +Places365_test_00299338.jpg +Places365_test_00299350.jpg +Places365_test_00299399.jpg +Places365_test_00299403.jpg +Places365_test_00299407.jpg +Places365_test_00299440.jpg +Places365_test_00299459.jpg +Places365_test_00299472.jpg +Places365_test_00299491.jpg +Places365_test_00299493.jpg +Places365_test_00299507.jpg +Places365_test_00299523.jpg +Places365_test_00299533.jpg +Places365_test_00299535.jpg +Places365_test_00299540.jpg +Places365_test_00299562.jpg +Places365_test_00299570.jpg +Places365_test_00299581.jpg +Places365_test_00299613.jpg +Places365_test_00299626.jpg +Places365_test_00299635.jpg +Places365_test_00299648.jpg +Places365_test_00299649.jpg +Places365_test_00299651.jpg +Places365_test_00299653.jpg +Places365_test_00299654.jpg +Places365_test_00299656.jpg +Places365_test_00299666.jpg +Places365_test_00299677.jpg +Places365_test_00299681.jpg +Places365_test_00299686.jpg +Places365_test_00299692.jpg +Places365_test_00299696.jpg +Places365_test_00299698.jpg +Places365_test_00299716.jpg +Places365_test_00299722.jpg +Places365_test_00299725.jpg +Places365_test_00299737.jpg +Places365_test_00299753.jpg +Places365_test_00299764.jpg +Places365_test_00299766.jpg +Places365_test_00299767.jpg +Places365_test_00299794.jpg +Places365_test_00299828.jpg +Places365_test_00299838.jpg +Places365_test_00299894.jpg +Places365_test_00299897.jpg +Places365_test_00299908.jpg +Places365_test_00299910.jpg +Places365_test_00299914.jpg +Places365_test_00299917.jpg +Places365_test_00299936.jpg +Places365_test_00299949.jpg +Places365_test_00299962.jpg +Places365_test_00299963.jpg +Places365_test_00299980.jpg +Places365_test_00299984.jpg +Places365_test_00299986.jpg +Places365_test_00299989.jpg +Places365_test_00299995.jpg +Places365_test_00300001.jpg +Places365_test_00300005.jpg +Places365_test_00300049.jpg +Places365_test_00300062.jpg +Places365_test_00300071.jpg +Places365_test_00300091.jpg +Places365_test_00300100.jpg +Places365_test_00300136.jpg +Places365_test_00300164.jpg +Places365_test_00300191.jpg +Places365_test_00300194.jpg +Places365_test_00300213.jpg +Places365_test_00300227.jpg +Places365_test_00300236.jpg +Places365_test_00300245.jpg +Places365_test_00300269.jpg +Places365_test_00300276.jpg +Places365_test_00300305.jpg +Places365_test_00300307.jpg +Places365_test_00300311.jpg +Places365_test_00300316.jpg +Places365_test_00300337.jpg +Places365_test_00300359.jpg +Places365_test_00300360.jpg +Places365_test_00300366.jpg +Places365_test_00300381.jpg +Places365_test_00300391.jpg +Places365_test_00300411.jpg +Places365_test_00300416.jpg +Places365_test_00300420.jpg +Places365_test_00300422.jpg +Places365_test_00300433.jpg +Places365_test_00300457.jpg +Places365_test_00300461.jpg +Places365_test_00300481.jpg +Places365_test_00300493.jpg +Places365_test_00300507.jpg +Places365_test_00300508.jpg +Places365_test_00300509.jpg +Places365_test_00300540.jpg +Places365_test_00300547.jpg +Places365_test_00300552.jpg +Places365_test_00300567.jpg +Places365_test_00300583.jpg +Places365_test_00300617.jpg +Places365_test_00300630.jpg +Places365_test_00300678.jpg +Places365_test_00300683.jpg +Places365_test_00300695.jpg +Places365_test_00300707.jpg +Places365_test_00300713.jpg +Places365_test_00300732.jpg +Places365_test_00300753.jpg +Places365_test_00300754.jpg +Places365_test_00300755.jpg +Places365_test_00300763.jpg +Places365_test_00300764.jpg +Places365_test_00300769.jpg +Places365_test_00300772.jpg +Places365_test_00300782.jpg +Places365_test_00300799.jpg +Places365_test_00300805.jpg +Places365_test_00300817.jpg +Places365_test_00300818.jpg +Places365_test_00300821.jpg +Places365_test_00300822.jpg +Places365_test_00300823.jpg +Places365_test_00300844.jpg +Places365_test_00300905.jpg +Places365_test_00300912.jpg +Places365_test_00300928.jpg +Places365_test_00300930.jpg +Places365_test_00301016.jpg +Places365_test_00301044.jpg +Places365_test_00301053.jpg +Places365_test_00301054.jpg +Places365_test_00301060.jpg +Places365_test_00301063.jpg +Places365_test_00301075.jpg +Places365_test_00301084.jpg +Places365_test_00301099.jpg +Places365_test_00301102.jpg +Places365_test_00301110.jpg +Places365_test_00301132.jpg +Places365_test_00301136.jpg +Places365_test_00301150.jpg +Places365_test_00301156.jpg +Places365_test_00301166.jpg +Places365_test_00301167.jpg +Places365_test_00301173.jpg +Places365_test_00301177.jpg +Places365_test_00301187.jpg +Places365_test_00301206.jpg +Places365_test_00301216.jpg +Places365_test_00301241.jpg +Places365_test_00301250.jpg +Places365_test_00301252.jpg +Places365_test_00301266.jpg +Places365_test_00301269.jpg +Places365_test_00301282.jpg +Places365_test_00301304.jpg +Places365_test_00301306.jpg +Places365_test_00301313.jpg +Places365_test_00301323.jpg +Places365_test_00301329.jpg +Places365_test_00301337.jpg +Places365_test_00301341.jpg +Places365_test_00301357.jpg +Places365_test_00301374.jpg +Places365_test_00301379.jpg +Places365_test_00301394.jpg +Places365_test_00301398.jpg +Places365_test_00301402.jpg +Places365_test_00301410.jpg +Places365_test_00301416.jpg +Places365_test_00301428.jpg +Places365_test_00301432.jpg +Places365_test_00301439.jpg +Places365_test_00301440.jpg +Places365_test_00301463.jpg +Places365_test_00301473.jpg +Places365_test_00301523.jpg +Places365_test_00301546.jpg +Places365_test_00301547.jpg +Places365_test_00301550.jpg +Places365_test_00301591.jpg +Places365_test_00301592.jpg +Places365_test_00301599.jpg +Places365_test_00301616.jpg +Places365_test_00301630.jpg +Places365_test_00301635.jpg +Places365_test_00301637.jpg +Places365_test_00301647.jpg +Places365_test_00301648.jpg +Places365_test_00301649.jpg +Places365_test_00301678.jpg +Places365_test_00301686.jpg +Places365_test_00301710.jpg +Places365_test_00301711.jpg +Places365_test_00301712.jpg +Places365_test_00301731.jpg +Places365_test_00301733.jpg +Places365_test_00301751.jpg +Places365_test_00301798.jpg +Places365_test_00301801.jpg +Places365_test_00301803.jpg +Places365_test_00301824.jpg +Places365_test_00301830.jpg +Places365_test_00301833.jpg +Places365_test_00301835.jpg +Places365_test_00301836.jpg +Places365_test_00301846.jpg +Places365_test_00301852.jpg +Places365_test_00301858.jpg +Places365_test_00301864.jpg +Places365_test_00301868.jpg +Places365_test_00301874.jpg +Places365_test_00301946.jpg +Places365_test_00301956.jpg +Places365_test_00301958.jpg +Places365_test_00301972.jpg +Places365_test_00301987.jpg +Places365_test_00301995.jpg +Places365_test_00302005.jpg +Places365_test_00302053.jpg +Places365_test_00302054.jpg +Places365_test_00302066.jpg +Places365_test_00302072.jpg +Places365_test_00302082.jpg +Places365_test_00302089.jpg +Places365_test_00302093.jpg +Places365_test_00302094.jpg +Places365_test_00302098.jpg +Places365_test_00302101.jpg +Places365_test_00302110.jpg +Places365_test_00302122.jpg +Places365_test_00302130.jpg +Places365_test_00302167.jpg +Places365_test_00302198.jpg +Places365_test_00302210.jpg +Places365_test_00302211.jpg +Places365_test_00302213.jpg +Places365_test_00302240.jpg +Places365_test_00302256.jpg +Places365_test_00302294.jpg +Places365_test_00302308.jpg +Places365_test_00302311.jpg +Places365_test_00302324.jpg +Places365_test_00302336.jpg +Places365_test_00302347.jpg +Places365_test_00302357.jpg +Places365_test_00302365.jpg +Places365_test_00302404.jpg +Places365_test_00302410.jpg +Places365_test_00302416.jpg +Places365_test_00302424.jpg +Places365_test_00302470.jpg +Places365_test_00302474.jpg +Places365_test_00302477.jpg +Places365_test_00302479.jpg +Places365_test_00302480.jpg +Places365_test_00302499.jpg +Places365_test_00302500.jpg +Places365_test_00302506.jpg +Places365_test_00302514.jpg +Places365_test_00302519.jpg +Places365_test_00302534.jpg +Places365_test_00302553.jpg +Places365_test_00302571.jpg +Places365_test_00302577.jpg +Places365_test_00302578.jpg +Places365_test_00302585.jpg +Places365_test_00302609.jpg +Places365_test_00302665.jpg +Places365_test_00302689.jpg +Places365_test_00302741.jpg +Places365_test_00302746.jpg +Places365_test_00302747.jpg +Places365_test_00302750.jpg +Places365_test_00302761.jpg +Places365_test_00302773.jpg +Places365_test_00302780.jpg +Places365_test_00302784.jpg +Places365_test_00302787.jpg +Places365_test_00302799.jpg +Places365_test_00302809.jpg +Places365_test_00302823.jpg +Places365_test_00302844.jpg +Places365_test_00302845.jpg +Places365_test_00302854.jpg +Places365_test_00302866.jpg +Places365_test_00302895.jpg +Places365_test_00302912.jpg +Places365_test_00302922.jpg +Places365_test_00302967.jpg +Places365_test_00302975.jpg +Places365_test_00302993.jpg +Places365_test_00303009.jpg +Places365_test_00303027.jpg +Places365_test_00303035.jpg +Places365_test_00303043.jpg +Places365_test_00303054.jpg +Places365_test_00303058.jpg +Places365_test_00303059.jpg +Places365_test_00303061.jpg +Places365_test_00303070.jpg +Places365_test_00303082.jpg +Places365_test_00303091.jpg +Places365_test_00303095.jpg +Places365_test_00303104.jpg +Places365_test_00303105.jpg +Places365_test_00303119.jpg +Places365_test_00303122.jpg +Places365_test_00303135.jpg +Places365_test_00303167.jpg +Places365_test_00303171.jpg +Places365_test_00303172.jpg +Places365_test_00303180.jpg +Places365_test_00303188.jpg +Places365_test_00303191.jpg +Places365_test_00303195.jpg +Places365_test_00303200.jpg +Places365_test_00303223.jpg +Places365_test_00303224.jpg +Places365_test_00303243.jpg +Places365_test_00303256.jpg +Places365_test_00303260.jpg +Places365_test_00303265.jpg +Places365_test_00303299.jpg +Places365_test_00303313.jpg +Places365_test_00303315.jpg +Places365_test_00303328.jpg +Places365_test_00303342.jpg +Places365_test_00303368.jpg +Places365_test_00303369.jpg +Places365_test_00303375.jpg +Places365_test_00303376.jpg +Places365_test_00303378.jpg +Places365_test_00303380.jpg +Places365_test_00303383.jpg +Places365_test_00303401.jpg +Places365_test_00303409.jpg +Places365_test_00303417.jpg +Places365_test_00303418.jpg +Places365_test_00303420.jpg +Places365_test_00303433.jpg +Places365_test_00303450.jpg +Places365_test_00303453.jpg +Places365_test_00303482.jpg +Places365_test_00303493.jpg +Places365_test_00303501.jpg +Places365_test_00303506.jpg +Places365_test_00303514.jpg +Places365_test_00303516.jpg +Places365_test_00303519.jpg +Places365_test_00303533.jpg +Places365_test_00303550.jpg +Places365_test_00303555.jpg +Places365_test_00303585.jpg +Places365_test_00303590.jpg +Places365_test_00303600.jpg +Places365_test_00303603.jpg +Places365_test_00303614.jpg +Places365_test_00303616.jpg +Places365_test_00303656.jpg +Places365_test_00303657.jpg +Places365_test_00303661.jpg +Places365_test_00303664.jpg +Places365_test_00303695.jpg +Places365_test_00303704.jpg +Places365_test_00303706.jpg +Places365_test_00303710.jpg +Places365_test_00303723.jpg +Places365_test_00303725.jpg +Places365_test_00303731.jpg +Places365_test_00303734.jpg +Places365_test_00303735.jpg +Places365_test_00303749.jpg +Places365_test_00303779.jpg +Places365_test_00303813.jpg +Places365_test_00303817.jpg +Places365_test_00303832.jpg +Places365_test_00303847.jpg +Places365_test_00303850.jpg +Places365_test_00303853.jpg +Places365_test_00303857.jpg +Places365_test_00303864.jpg +Places365_test_00303866.jpg +Places365_test_00303869.jpg +Places365_test_00303870.jpg +Places365_test_00303877.jpg +Places365_test_00303884.jpg +Places365_test_00303906.jpg +Places365_test_00303916.jpg +Places365_test_00303938.jpg +Places365_test_00303969.jpg +Places365_test_00303986.jpg +Places365_test_00303987.jpg +Places365_test_00304002.jpg +Places365_test_00304010.jpg +Places365_test_00304012.jpg +Places365_test_00304017.jpg +Places365_test_00304028.jpg +Places365_test_00304038.jpg +Places365_test_00304045.jpg +Places365_test_00304053.jpg +Places365_test_00304056.jpg +Places365_test_00304058.jpg +Places365_test_00304062.jpg +Places365_test_00304064.jpg +Places365_test_00304071.jpg +Places365_test_00304086.jpg +Places365_test_00304089.jpg +Places365_test_00304142.jpg +Places365_test_00304146.jpg +Places365_test_00304157.jpg +Places365_test_00304164.jpg +Places365_test_00304191.jpg +Places365_test_00304216.jpg +Places365_test_00304218.jpg +Places365_test_00304227.jpg +Places365_test_00304245.jpg +Places365_test_00304248.jpg +Places365_test_00304262.jpg +Places365_test_00304273.jpg +Places365_test_00304310.jpg +Places365_test_00304318.jpg +Places365_test_00304319.jpg +Places365_test_00304334.jpg +Places365_test_00304364.jpg +Places365_test_00304384.jpg +Places365_test_00304413.jpg +Places365_test_00304419.jpg +Places365_test_00304434.jpg +Places365_test_00304435.jpg +Places365_test_00304448.jpg +Places365_test_00304472.jpg +Places365_test_00304477.jpg +Places365_test_00304485.jpg +Places365_test_00304502.jpg +Places365_test_00304557.jpg +Places365_test_00304573.jpg +Places365_test_00304589.jpg +Places365_test_00304598.jpg +Places365_test_00304612.jpg +Places365_test_00304624.jpg +Places365_test_00304628.jpg +Places365_test_00304637.jpg +Places365_test_00304644.jpg +Places365_test_00304656.jpg +Places365_test_00304660.jpg +Places365_test_00304662.jpg +Places365_test_00304666.jpg +Places365_test_00304677.jpg +Places365_test_00304710.jpg +Places365_test_00304723.jpg +Places365_test_00304741.jpg +Places365_test_00304742.jpg +Places365_test_00304746.jpg +Places365_test_00304756.jpg +Places365_test_00304772.jpg +Places365_test_00304777.jpg +Places365_test_00304783.jpg +Places365_test_00304794.jpg +Places365_test_00304798.jpg +Places365_test_00304799.jpg +Places365_test_00304802.jpg +Places365_test_00304804.jpg +Places365_test_00304812.jpg +Places365_test_00304818.jpg +Places365_test_00304868.jpg +Places365_test_00304871.jpg +Places365_test_00304882.jpg +Places365_test_00304885.jpg +Places365_test_00304901.jpg +Places365_test_00304904.jpg +Places365_test_00304905.jpg +Places365_test_00304927.jpg +Places365_test_00304934.jpg +Places365_test_00304946.jpg +Places365_test_00304949.jpg +Places365_test_00304955.jpg +Places365_test_00304978.jpg +Places365_test_00304983.jpg +Places365_test_00304984.jpg +Places365_test_00304994.jpg +Places365_test_00304995.jpg +Places365_test_00304997.jpg +Places365_test_00305001.jpg +Places365_test_00305004.jpg +Places365_test_00305020.jpg +Places365_test_00305021.jpg +Places365_test_00305034.jpg +Places365_test_00305045.jpg +Places365_test_00305058.jpg +Places365_test_00305075.jpg +Places365_test_00305085.jpg +Places365_test_00305090.jpg +Places365_test_00305094.jpg +Places365_test_00305103.jpg +Places365_test_00305118.jpg +Places365_test_00305119.jpg +Places365_test_00305121.jpg +Places365_test_00305139.jpg +Places365_test_00305150.jpg +Places365_test_00305176.jpg +Places365_test_00305188.jpg +Places365_test_00305198.jpg +Places365_test_00305208.jpg +Places365_test_00305210.jpg +Places365_test_00305217.jpg +Places365_test_00305232.jpg +Places365_test_00305266.jpg +Places365_test_00305271.jpg +Places365_test_00305280.jpg +Places365_test_00305294.jpg +Places365_test_00305323.jpg +Places365_test_00305328.jpg +Places365_test_00305331.jpg +Places365_test_00305342.jpg +Places365_test_00305344.jpg +Places365_test_00305356.jpg +Places365_test_00305362.jpg +Places365_test_00305364.jpg +Places365_test_00305382.jpg +Places365_test_00305402.jpg +Places365_test_00305409.jpg +Places365_test_00305411.jpg +Places365_test_00305429.jpg +Places365_test_00305453.jpg +Places365_test_00305471.jpg +Places365_test_00305472.jpg +Places365_test_00305497.jpg +Places365_test_00305507.jpg +Places365_test_00305516.jpg +Places365_test_00305523.jpg +Places365_test_00305532.jpg +Places365_test_00305541.jpg +Places365_test_00305584.jpg +Places365_test_00305592.jpg +Places365_test_00305594.jpg +Places365_test_00305621.jpg +Places365_test_00305682.jpg +Places365_test_00305684.jpg +Places365_test_00305685.jpg +Places365_test_00305689.jpg +Places365_test_00305697.jpg +Places365_test_00305709.jpg +Places365_test_00305713.jpg +Places365_test_00305730.jpg +Places365_test_00305733.jpg +Places365_test_00305743.jpg +Places365_test_00305782.jpg +Places365_test_00305796.jpg +Places365_test_00305842.jpg +Places365_test_00305843.jpg +Places365_test_00305868.jpg +Places365_test_00305870.jpg +Places365_test_00305895.jpg +Places365_test_00305899.jpg +Places365_test_00305900.jpg +Places365_test_00305923.jpg +Places365_test_00305924.jpg +Places365_test_00305931.jpg +Places365_test_00305933.jpg +Places365_test_00305937.jpg +Places365_test_00305946.jpg +Places365_test_00305951.jpg +Places365_test_00305955.jpg +Places365_test_00305961.jpg +Places365_test_00305983.jpg +Places365_test_00305984.jpg +Places365_test_00305994.jpg +Places365_test_00305996.jpg +Places365_test_00306001.jpg +Places365_test_00306005.jpg +Places365_test_00306008.jpg +Places365_test_00306010.jpg +Places365_test_00306016.jpg +Places365_test_00306026.jpg +Places365_test_00306031.jpg +Places365_test_00306033.jpg +Places365_test_00306040.jpg +Places365_test_00306052.jpg +Places365_test_00306053.jpg +Places365_test_00306057.jpg +Places365_test_00306061.jpg +Places365_test_00306079.jpg +Places365_test_00306112.jpg +Places365_test_00306139.jpg +Places365_test_00306143.jpg +Places365_test_00306147.jpg +Places365_test_00306177.jpg +Places365_test_00306179.jpg +Places365_test_00306196.jpg +Places365_test_00306203.jpg +Places365_test_00306211.jpg +Places365_test_00306216.jpg +Places365_test_00306225.jpg +Places365_test_00306227.jpg +Places365_test_00306233.jpg +Places365_test_00306236.jpg +Places365_test_00306251.jpg +Places365_test_00306266.jpg +Places365_test_00306277.jpg +Places365_test_00306301.jpg +Places365_test_00306327.jpg +Places365_test_00306328.jpg +Places365_test_00306336.jpg +Places365_test_00306343.jpg +Places365_test_00306344.jpg +Places365_test_00306359.jpg +Places365_test_00306369.jpg +Places365_test_00306385.jpg +Places365_test_00306389.jpg +Places365_test_00306418.jpg +Places365_test_00306430.jpg +Places365_test_00306433.jpg +Places365_test_00306449.jpg +Places365_test_00306452.jpg +Places365_test_00306462.jpg +Places365_test_00306463.jpg +Places365_test_00306470.jpg +Places365_test_00306473.jpg +Places365_test_00306476.jpg +Places365_test_00306482.jpg +Places365_test_00306501.jpg +Places365_test_00306502.jpg +Places365_test_00306503.jpg +Places365_test_00306512.jpg +Places365_test_00306519.jpg +Places365_test_00306524.jpg +Places365_test_00306530.jpg +Places365_test_00306535.jpg +Places365_test_00306536.jpg +Places365_test_00306538.jpg +Places365_test_00306545.jpg +Places365_test_00306558.jpg +Places365_test_00306561.jpg +Places365_test_00306572.jpg +Places365_test_00306587.jpg +Places365_test_00306643.jpg +Places365_test_00306645.jpg +Places365_test_00306646.jpg +Places365_test_00306648.jpg +Places365_test_00306657.jpg +Places365_test_00306662.jpg +Places365_test_00306675.jpg +Places365_test_00306680.jpg +Places365_test_00306690.jpg +Places365_test_00306728.jpg +Places365_test_00306740.jpg +Places365_test_00306754.jpg +Places365_test_00306757.jpg +Places365_test_00306769.jpg +Places365_test_00306789.jpg +Places365_test_00306791.jpg +Places365_test_00306803.jpg +Places365_test_00306804.jpg +Places365_test_00306815.jpg +Places365_test_00306824.jpg +Places365_test_00306855.jpg +Places365_test_00306869.jpg +Places365_test_00306879.jpg +Places365_test_00306897.jpg +Places365_test_00306902.jpg +Places365_test_00306903.jpg +Places365_test_00306904.jpg +Places365_test_00306908.jpg +Places365_test_00306937.jpg +Places365_test_00306946.jpg +Places365_test_00306948.jpg +Places365_test_00306960.jpg +Places365_test_00306977.jpg +Places365_test_00306984.jpg +Places365_test_00307004.jpg +Places365_test_00307059.jpg +Places365_test_00307065.jpg +Places365_test_00307075.jpg +Places365_test_00307083.jpg +Places365_test_00307095.jpg +Places365_test_00307096.jpg +Places365_test_00307103.jpg +Places365_test_00307111.jpg +Places365_test_00307113.jpg +Places365_test_00307119.jpg +Places365_test_00307121.jpg +Places365_test_00307127.jpg +Places365_test_00307160.jpg +Places365_test_00307164.jpg +Places365_test_00307165.jpg +Places365_test_00307168.jpg +Places365_test_00307188.jpg +Places365_test_00307190.jpg +Places365_test_00307192.jpg +Places365_test_00307194.jpg +Places365_test_00307206.jpg +Places365_test_00307215.jpg +Places365_test_00307231.jpg +Places365_test_00307237.jpg +Places365_test_00307238.jpg +Places365_test_00307243.jpg +Places365_test_00307249.jpg +Places365_test_00307256.jpg +Places365_test_00307310.jpg +Places365_test_00307312.jpg +Places365_test_00307315.jpg +Places365_test_00307324.jpg +Places365_test_00307332.jpg +Places365_test_00307342.jpg +Places365_test_00307343.jpg +Places365_test_00307346.jpg +Places365_test_00307376.jpg +Places365_test_00307377.jpg +Places365_test_00307393.jpg +Places365_test_00307412.jpg +Places365_test_00307431.jpg +Places365_test_00307442.jpg +Places365_test_00307443.jpg +Places365_test_00307475.jpg +Places365_test_00307482.jpg +Places365_test_00307484.jpg +Places365_test_00307502.jpg +Places365_test_00307509.jpg +Places365_test_00307515.jpg +Places365_test_00307524.jpg +Places365_test_00307547.jpg +Places365_test_00307559.jpg +Places365_test_00307561.jpg +Places365_test_00307580.jpg +Places365_test_00307584.jpg +Places365_test_00307586.jpg +Places365_test_00307591.jpg +Places365_test_00307652.jpg +Places365_test_00307656.jpg +Places365_test_00307662.jpg +Places365_test_00307705.jpg +Places365_test_00307707.jpg +Places365_test_00307710.jpg +Places365_test_00307719.jpg +Places365_test_00307722.jpg +Places365_test_00307728.jpg +Places365_test_00307733.jpg +Places365_test_00307739.jpg +Places365_test_00307744.jpg +Places365_test_00307773.jpg +Places365_test_00307795.jpg +Places365_test_00307801.jpg +Places365_test_00307809.jpg +Places365_test_00307814.jpg +Places365_test_00307827.jpg +Places365_test_00307832.jpg +Places365_test_00307836.jpg +Places365_test_00307844.jpg +Places365_test_00307853.jpg +Places365_test_00307857.jpg +Places365_test_00307874.jpg +Places365_test_00307900.jpg +Places365_test_00307908.jpg +Places365_test_00307919.jpg +Places365_test_00307923.jpg +Places365_test_00307928.jpg +Places365_test_00307929.jpg +Places365_test_00307942.jpg +Places365_test_00307952.jpg +Places365_test_00307953.jpg +Places365_test_00307961.jpg +Places365_test_00307962.jpg +Places365_test_00307965.jpg +Places365_test_00307967.jpg +Places365_test_00307970.jpg +Places365_test_00307971.jpg +Places365_test_00307980.jpg +Places365_test_00307990.jpg +Places365_test_00307995.jpg +Places365_test_00308001.jpg +Places365_test_00308019.jpg +Places365_test_00308021.jpg +Places365_test_00308022.jpg +Places365_test_00308033.jpg +Places365_test_00308062.jpg +Places365_test_00308065.jpg +Places365_test_00308078.jpg +Places365_test_00308083.jpg +Places365_test_00308098.jpg +Places365_test_00308102.jpg +Places365_test_00308107.jpg +Places365_test_00308113.jpg +Places365_test_00308123.jpg +Places365_test_00308124.jpg +Places365_test_00308137.jpg +Places365_test_00308189.jpg +Places365_test_00308191.jpg +Places365_test_00308212.jpg +Places365_test_00308223.jpg +Places365_test_00308232.jpg +Places365_test_00308246.jpg +Places365_test_00308258.jpg +Places365_test_00308301.jpg +Places365_test_00308302.jpg +Places365_test_00308327.jpg +Places365_test_00308333.jpg +Places365_test_00308337.jpg +Places365_test_00308381.jpg +Places365_test_00308393.jpg +Places365_test_00308400.jpg +Places365_test_00308433.jpg +Places365_test_00308442.jpg +Places365_test_00308450.jpg +Places365_test_00308458.jpg +Places365_test_00308484.jpg +Places365_test_00308498.jpg +Places365_test_00308519.jpg +Places365_test_00308544.jpg +Places365_test_00308556.jpg +Places365_test_00308558.jpg +Places365_test_00308565.jpg +Places365_test_00308567.jpg +Places365_test_00308578.jpg +Places365_test_00308606.jpg +Places365_test_00308614.jpg +Places365_test_00308617.jpg +Places365_test_00308620.jpg +Places365_test_00308629.jpg +Places365_test_00308640.jpg +Places365_test_00308653.jpg +Places365_test_00308657.jpg +Places365_test_00308665.jpg +Places365_test_00308675.jpg +Places365_test_00308691.jpg +Places365_test_00308698.jpg +Places365_test_00308704.jpg +Places365_test_00308711.jpg +Places365_test_00308712.jpg +Places365_test_00308721.jpg +Places365_test_00308724.jpg +Places365_test_00308730.jpg +Places365_test_00308734.jpg +Places365_test_00308755.jpg +Places365_test_00308756.jpg +Places365_test_00308759.jpg +Places365_test_00308765.jpg +Places365_test_00308769.jpg +Places365_test_00308792.jpg +Places365_test_00308816.jpg +Places365_test_00308836.jpg +Places365_test_00308851.jpg +Places365_test_00308854.jpg +Places365_test_00308884.jpg +Places365_test_00308892.jpg +Places365_test_00308896.jpg +Places365_test_00308909.jpg +Places365_test_00308929.jpg +Places365_test_00308939.jpg +Places365_test_00308947.jpg +Places365_test_00308951.jpg +Places365_test_00308958.jpg +Places365_test_00308960.jpg +Places365_test_00309032.jpg +Places365_test_00309045.jpg +Places365_test_00309056.jpg +Places365_test_00309064.jpg +Places365_test_00309083.jpg +Places365_test_00309102.jpg +Places365_test_00309111.jpg +Places365_test_00309129.jpg +Places365_test_00309152.jpg +Places365_test_00309154.jpg +Places365_test_00309155.jpg +Places365_test_00309167.jpg +Places365_test_00309169.jpg +Places365_test_00309180.jpg +Places365_test_00309206.jpg +Places365_test_00309207.jpg +Places365_test_00309225.jpg +Places365_test_00309236.jpg +Places365_test_00309242.jpg +Places365_test_00309259.jpg +Places365_test_00309268.jpg +Places365_test_00309285.jpg +Places365_test_00309287.jpg +Places365_test_00309311.jpg +Places365_test_00309314.jpg +Places365_test_00309318.jpg +Places365_test_00309344.jpg +Places365_test_00309355.jpg +Places365_test_00309362.jpg +Places365_test_00309365.jpg +Places365_test_00309371.jpg +Places365_test_00309398.jpg +Places365_test_00309417.jpg +Places365_test_00309456.jpg +Places365_test_00309464.jpg +Places365_test_00309466.jpg +Places365_test_00309474.jpg +Places365_test_00309503.jpg +Places365_test_00309529.jpg +Places365_test_00309538.jpg +Places365_test_00309543.jpg +Places365_test_00309546.jpg +Places365_test_00309550.jpg +Places365_test_00309558.jpg +Places365_test_00309565.jpg +Places365_test_00309572.jpg +Places365_test_00309590.jpg +Places365_test_00309613.jpg +Places365_test_00309616.jpg +Places365_test_00309617.jpg +Places365_test_00309620.jpg +Places365_test_00309623.jpg +Places365_test_00309631.jpg +Places365_test_00309634.jpg +Places365_test_00309647.jpg +Places365_test_00309655.jpg +Places365_test_00309673.jpg +Places365_test_00309676.jpg +Places365_test_00309692.jpg +Places365_test_00309694.jpg +Places365_test_00309702.jpg +Places365_test_00309743.jpg +Places365_test_00309744.jpg +Places365_test_00309761.jpg +Places365_test_00309772.jpg +Places365_test_00309789.jpg +Places365_test_00309817.jpg +Places365_test_00309839.jpg +Places365_test_00309840.jpg +Places365_test_00309875.jpg +Places365_test_00309879.jpg +Places365_test_00309883.jpg +Places365_test_00309889.jpg +Places365_test_00309896.jpg +Places365_test_00309914.jpg +Places365_test_00309917.jpg +Places365_test_00309935.jpg +Places365_test_00309942.jpg +Places365_test_00309945.jpg +Places365_test_00309947.jpg +Places365_test_00309959.jpg +Places365_test_00309972.jpg +Places365_test_00309998.jpg +Places365_test_00309999.jpg +Places365_test_00310014.jpg +Places365_test_00310031.jpg +Places365_test_00310041.jpg +Places365_test_00310052.jpg +Places365_test_00310054.jpg +Places365_test_00310056.jpg +Places365_test_00310061.jpg +Places365_test_00310064.jpg +Places365_test_00310069.jpg +Places365_test_00310070.jpg +Places365_test_00310074.jpg +Places365_test_00310108.jpg +Places365_test_00310112.jpg +Places365_test_00310122.jpg +Places365_test_00310127.jpg +Places365_test_00310137.jpg +Places365_test_00310146.jpg +Places365_test_00310166.jpg +Places365_test_00310212.jpg +Places365_test_00310217.jpg +Places365_test_00310229.jpg +Places365_test_00310234.jpg +Places365_test_00310240.jpg +Places365_test_00310241.jpg +Places365_test_00310250.jpg +Places365_test_00310259.jpg +Places365_test_00310260.jpg +Places365_test_00310264.jpg +Places365_test_00310266.jpg +Places365_test_00310281.jpg +Places365_test_00310282.jpg +Places365_test_00310307.jpg +Places365_test_00310330.jpg +Places365_test_00310366.jpg +Places365_test_00310372.jpg +Places365_test_00310373.jpg +Places365_test_00310399.jpg +Places365_test_00310400.jpg +Places365_test_00310421.jpg +Places365_test_00310481.jpg +Places365_test_00310498.jpg +Places365_test_00310507.jpg +Places365_test_00310513.jpg +Places365_test_00310525.jpg +Places365_test_00310555.jpg +Places365_test_00310564.jpg +Places365_test_00310572.jpg +Places365_test_00310614.jpg +Places365_test_00310620.jpg +Places365_test_00310624.jpg +Places365_test_00310631.jpg +Places365_test_00310639.jpg +Places365_test_00310642.jpg +Places365_test_00310648.jpg +Places365_test_00310655.jpg +Places365_test_00310662.jpg +Places365_test_00310681.jpg +Places365_test_00310696.jpg +Places365_test_00310727.jpg +Places365_test_00310731.jpg +Places365_test_00310734.jpg +Places365_test_00310740.jpg +Places365_test_00310747.jpg +Places365_test_00310752.jpg +Places365_test_00310753.jpg +Places365_test_00310764.jpg +Places365_test_00310784.jpg +Places365_test_00310785.jpg +Places365_test_00310830.jpg +Places365_test_00310843.jpg +Places365_test_00310847.jpg +Places365_test_00310867.jpg +Places365_test_00310904.jpg +Places365_test_00310905.jpg +Places365_test_00310933.jpg +Places365_test_00310935.jpg +Places365_test_00310941.jpg +Places365_test_00310946.jpg +Places365_test_00310986.jpg +Places365_test_00310990.jpg +Places365_test_00310998.jpg +Places365_test_00311039.jpg +Places365_test_00311055.jpg +Places365_test_00311056.jpg +Places365_test_00311061.jpg +Places365_test_00311101.jpg +Places365_test_00311108.jpg +Places365_test_00311144.jpg +Places365_test_00311167.jpg +Places365_test_00311171.jpg +Places365_test_00311182.jpg +Places365_test_00311188.jpg +Places365_test_00311212.jpg +Places365_test_00311227.jpg +Places365_test_00311232.jpg +Places365_test_00311243.jpg +Places365_test_00311250.jpg +Places365_test_00311258.jpg +Places365_test_00311298.jpg +Places365_test_00311299.jpg +Places365_test_00311302.jpg +Places365_test_00311343.jpg +Places365_test_00311354.jpg +Places365_test_00311356.jpg +Places365_test_00311375.jpg +Places365_test_00311379.jpg +Places365_test_00311387.jpg +Places365_test_00311392.jpg +Places365_test_00311425.jpg +Places365_test_00311427.jpg +Places365_test_00311431.jpg +Places365_test_00311453.jpg +Places365_test_00311472.jpg +Places365_test_00311495.jpg +Places365_test_00311504.jpg +Places365_test_00311510.jpg +Places365_test_00311545.jpg +Places365_test_00311569.jpg +Places365_test_00311603.jpg +Places365_test_00311626.jpg +Places365_test_00311638.jpg +Places365_test_00311641.jpg +Places365_test_00311650.jpg +Places365_test_00311657.jpg +Places365_test_00311660.jpg +Places365_test_00311664.jpg +Places365_test_00311665.jpg +Places365_test_00311675.jpg +Places365_test_00311689.jpg +Places365_test_00311693.jpg +Places365_test_00311699.jpg +Places365_test_00311700.jpg +Places365_test_00311717.jpg +Places365_test_00311722.jpg +Places365_test_00311727.jpg +Places365_test_00311731.jpg +Places365_test_00311738.jpg +Places365_test_00311749.jpg +Places365_test_00311751.jpg +Places365_test_00311772.jpg +Places365_test_00311786.jpg +Places365_test_00311790.jpg +Places365_test_00311791.jpg +Places365_test_00311792.jpg +Places365_test_00311805.jpg +Places365_test_00311825.jpg +Places365_test_00311840.jpg +Places365_test_00311879.jpg +Places365_test_00311912.jpg +Places365_test_00311914.jpg +Places365_test_00311915.jpg +Places365_test_00311930.jpg +Places365_test_00311951.jpg +Places365_test_00311955.jpg +Places365_test_00311992.jpg +Places365_test_00312007.jpg +Places365_test_00312032.jpg +Places365_test_00312044.jpg +Places365_test_00312054.jpg +Places365_test_00312056.jpg +Places365_test_00312057.jpg +Places365_test_00312061.jpg +Places365_test_00312073.jpg +Places365_test_00312078.jpg +Places365_test_00312098.jpg +Places365_test_00312112.jpg +Places365_test_00312113.jpg +Places365_test_00312114.jpg +Places365_test_00312124.jpg +Places365_test_00312136.jpg +Places365_test_00312140.jpg +Places365_test_00312145.jpg +Places365_test_00312205.jpg +Places365_test_00312209.jpg +Places365_test_00312212.jpg +Places365_test_00312218.jpg +Places365_test_00312231.jpg +Places365_test_00312241.jpg +Places365_test_00312250.jpg +Places365_test_00312252.jpg +Places365_test_00312254.jpg +Places365_test_00312269.jpg +Places365_test_00312273.jpg +Places365_test_00312293.jpg +Places365_test_00312300.jpg +Places365_test_00312302.jpg +Places365_test_00312307.jpg +Places365_test_00312314.jpg +Places365_test_00312316.jpg +Places365_test_00312318.jpg +Places365_test_00312322.jpg +Places365_test_00312323.jpg +Places365_test_00312352.jpg +Places365_test_00312353.jpg +Places365_test_00312354.jpg +Places365_test_00312355.jpg +Places365_test_00312381.jpg +Places365_test_00312388.jpg +Places365_test_00312430.jpg +Places365_test_00312435.jpg +Places365_test_00312440.jpg +Places365_test_00312463.jpg +Places365_test_00312492.jpg +Places365_test_00312498.jpg +Places365_test_00312508.jpg +Places365_test_00312533.jpg +Places365_test_00312583.jpg +Places365_test_00312627.jpg +Places365_test_00312641.jpg +Places365_test_00312642.jpg +Places365_test_00312654.jpg +Places365_test_00312661.jpg +Places365_test_00312681.jpg +Places365_test_00312695.jpg +Places365_test_00312700.jpg +Places365_test_00312701.jpg +Places365_test_00312704.jpg +Places365_test_00312727.jpg +Places365_test_00312734.jpg +Places365_test_00312740.jpg +Places365_test_00312749.jpg +Places365_test_00312752.jpg +Places365_test_00312756.jpg +Places365_test_00312763.jpg +Places365_test_00312765.jpg +Places365_test_00312771.jpg +Places365_test_00312777.jpg +Places365_test_00312781.jpg +Places365_test_00312803.jpg +Places365_test_00312808.jpg +Places365_test_00312816.jpg +Places365_test_00312825.jpg +Places365_test_00312833.jpg +Places365_test_00312834.jpg +Places365_test_00312835.jpg +Places365_test_00312852.jpg +Places365_test_00312859.jpg +Places365_test_00312869.jpg +Places365_test_00312895.jpg +Places365_test_00312913.jpg +Places365_test_00312926.jpg +Places365_test_00312934.jpg +Places365_test_00312961.jpg +Places365_test_00312969.jpg +Places365_test_00312973.jpg +Places365_test_00312982.jpg +Places365_test_00312987.jpg +Places365_test_00312993.jpg +Places365_test_00313005.jpg +Places365_test_00313032.jpg +Places365_test_00313035.jpg +Places365_test_00313040.jpg +Places365_test_00313043.jpg +Places365_test_00313048.jpg +Places365_test_00313062.jpg +Places365_test_00313064.jpg +Places365_test_00313079.jpg +Places365_test_00313080.jpg +Places365_test_00313099.jpg +Places365_test_00313102.jpg +Places365_test_00313115.jpg +Places365_test_00313116.jpg +Places365_test_00313127.jpg +Places365_test_00313129.jpg +Places365_test_00313138.jpg +Places365_test_00313140.jpg +Places365_test_00313150.jpg +Places365_test_00313155.jpg +Places365_test_00313161.jpg +Places365_test_00313168.jpg +Places365_test_00313179.jpg +Places365_test_00313185.jpg +Places365_test_00313189.jpg +Places365_test_00313199.jpg +Places365_test_00313206.jpg +Places365_test_00313207.jpg +Places365_test_00313218.jpg +Places365_test_00313226.jpg +Places365_test_00313233.jpg +Places365_test_00313244.jpg +Places365_test_00313278.jpg +Places365_test_00313287.jpg +Places365_test_00313288.jpg +Places365_test_00313291.jpg +Places365_test_00313292.jpg +Places365_test_00313293.jpg +Places365_test_00313304.jpg +Places365_test_00313316.jpg +Places365_test_00313321.jpg +Places365_test_00313333.jpg +Places365_test_00313350.jpg +Places365_test_00313362.jpg +Places365_test_00313381.jpg +Places365_test_00313383.jpg +Places365_test_00313399.jpg +Places365_test_00313400.jpg +Places365_test_00313418.jpg +Places365_test_00313420.jpg +Places365_test_00313421.jpg +Places365_test_00313442.jpg +Places365_test_00313447.jpg +Places365_test_00313449.jpg +Places365_test_00313461.jpg +Places365_test_00313462.jpg +Places365_test_00313471.jpg +Places365_test_00313479.jpg +Places365_test_00313486.jpg +Places365_test_00313492.jpg +Places365_test_00313498.jpg +Places365_test_00313512.jpg +Places365_test_00313515.jpg +Places365_test_00313520.jpg +Places365_test_00313528.jpg +Places365_test_00313541.jpg +Places365_test_00313550.jpg +Places365_test_00313595.jpg +Places365_test_00313618.jpg +Places365_test_00313619.jpg +Places365_test_00313642.jpg +Places365_test_00313676.jpg +Places365_test_00313682.jpg +Places365_test_00313686.jpg +Places365_test_00313704.jpg +Places365_test_00313746.jpg +Places365_test_00313749.jpg +Places365_test_00313786.jpg +Places365_test_00313787.jpg +Places365_test_00313814.jpg +Places365_test_00313822.jpg +Places365_test_00313848.jpg +Places365_test_00313860.jpg +Places365_test_00313861.jpg +Places365_test_00313873.jpg +Places365_test_00313883.jpg +Places365_test_00313912.jpg +Places365_test_00313956.jpg +Places365_test_00313958.jpg +Places365_test_00313997.jpg +Places365_test_00314003.jpg +Places365_test_00314024.jpg +Places365_test_00314039.jpg +Places365_test_00314041.jpg +Places365_test_00314063.jpg +Places365_test_00314068.jpg +Places365_test_00314072.jpg +Places365_test_00314075.jpg +Places365_test_00314084.jpg +Places365_test_00314116.jpg +Places365_test_00314142.jpg +Places365_test_00314168.jpg +Places365_test_00314177.jpg +Places365_test_00314178.jpg +Places365_test_00314179.jpg +Places365_test_00314184.jpg +Places365_test_00314189.jpg +Places365_test_00314215.jpg +Places365_test_00314236.jpg +Places365_test_00314246.jpg +Places365_test_00314248.jpg +Places365_test_00314258.jpg +Places365_test_00314264.jpg +Places365_test_00314270.jpg +Places365_test_00314288.jpg +Places365_test_00314294.jpg +Places365_test_00314296.jpg +Places365_test_00314307.jpg +Places365_test_00314312.jpg +Places365_test_00314325.jpg +Places365_test_00314328.jpg +Places365_test_00314344.jpg +Places365_test_00314349.jpg +Places365_test_00314381.jpg +Places365_test_00314430.jpg +Places365_test_00314454.jpg +Places365_test_00314460.jpg +Places365_test_00314473.jpg +Places365_test_00314486.jpg +Places365_test_00314514.jpg +Places365_test_00314515.jpg +Places365_test_00314516.jpg +Places365_test_00314517.jpg +Places365_test_00314518.jpg +Places365_test_00314524.jpg +Places365_test_00314544.jpg +Places365_test_00314554.jpg +Places365_test_00314562.jpg +Places365_test_00314566.jpg +Places365_test_00314569.jpg +Places365_test_00314596.jpg +Places365_test_00314606.jpg +Places365_test_00314621.jpg +Places365_test_00314627.jpg +Places365_test_00314641.jpg +Places365_test_00314656.jpg +Places365_test_00314657.jpg +Places365_test_00314690.jpg +Places365_test_00314696.jpg +Places365_test_00314723.jpg +Places365_test_00314726.jpg +Places365_test_00314742.jpg +Places365_test_00314754.jpg +Places365_test_00314759.jpg +Places365_test_00314762.jpg +Places365_test_00314778.jpg +Places365_test_00314819.jpg +Places365_test_00314827.jpg +Places365_test_00314832.jpg +Places365_test_00314835.jpg +Places365_test_00314847.jpg +Places365_test_00314852.jpg +Places365_test_00314890.jpg +Places365_test_00314892.jpg +Places365_test_00314913.jpg +Places365_test_00314915.jpg +Places365_test_00314922.jpg +Places365_test_00314928.jpg +Places365_test_00314936.jpg +Places365_test_00314949.jpg +Places365_test_00314958.jpg +Places365_test_00314965.jpg +Places365_test_00314974.jpg +Places365_test_00315003.jpg +Places365_test_00315006.jpg +Places365_test_00315012.jpg +Places365_test_00315020.jpg +Places365_test_00315024.jpg +Places365_test_00315032.jpg +Places365_test_00315034.jpg +Places365_test_00315039.jpg +Places365_test_00315044.jpg +Places365_test_00315077.jpg +Places365_test_00315086.jpg +Places365_test_00315089.jpg +Places365_test_00315090.jpg +Places365_test_00315103.jpg +Places365_test_00315106.jpg +Places365_test_00315117.jpg +Places365_test_00315124.jpg +Places365_test_00315134.jpg +Places365_test_00315138.jpg +Places365_test_00315141.jpg +Places365_test_00315150.jpg +Places365_test_00315154.jpg +Places365_test_00315157.jpg +Places365_test_00315170.jpg +Places365_test_00315192.jpg +Places365_test_00315239.jpg +Places365_test_00315241.jpg +Places365_test_00315250.jpg +Places365_test_00315251.jpg +Places365_test_00315254.jpg +Places365_test_00315262.jpg +Places365_test_00315264.jpg +Places365_test_00315270.jpg +Places365_test_00315274.jpg +Places365_test_00315277.jpg +Places365_test_00315282.jpg +Places365_test_00315301.jpg +Places365_test_00315307.jpg +Places365_test_00315333.jpg +Places365_test_00315334.jpg +Places365_test_00315340.jpg +Places365_test_00315341.jpg +Places365_test_00315350.jpg +Places365_test_00315361.jpg +Places365_test_00315372.jpg +Places365_test_00315421.jpg +Places365_test_00315430.jpg +Places365_test_00315441.jpg +Places365_test_00315446.jpg +Places365_test_00315453.jpg +Places365_test_00315454.jpg +Places365_test_00315457.jpg +Places365_test_00315458.jpg +Places365_test_00315462.jpg +Places365_test_00315464.jpg +Places365_test_00315467.jpg +Places365_test_00315478.jpg +Places365_test_00315490.jpg +Places365_test_00315493.jpg +Places365_test_00315495.jpg +Places365_test_00315503.jpg +Places365_test_00315529.jpg +Places365_test_00315552.jpg +Places365_test_00315566.jpg +Places365_test_00315602.jpg +Places365_test_00315604.jpg +Places365_test_00315609.jpg +Places365_test_00315612.jpg +Places365_test_00315619.jpg +Places365_test_00315632.jpg +Places365_test_00315644.jpg +Places365_test_00315653.jpg +Places365_test_00315662.jpg +Places365_test_00315670.jpg +Places365_test_00315672.jpg +Places365_test_00315692.jpg +Places365_test_00315731.jpg +Places365_test_00315740.jpg +Places365_test_00315741.jpg +Places365_test_00315765.jpg +Places365_test_00315782.jpg +Places365_test_00315801.jpg +Places365_test_00315816.jpg +Places365_test_00315829.jpg +Places365_test_00315845.jpg +Places365_test_00315897.jpg +Places365_test_00315904.jpg +Places365_test_00315907.jpg +Places365_test_00315908.jpg +Places365_test_00315909.jpg +Places365_test_00315927.jpg +Places365_test_00315958.jpg +Places365_test_00315959.jpg +Places365_test_00315961.jpg +Places365_test_00315971.jpg +Places365_test_00315976.jpg +Places365_test_00315988.jpg +Places365_test_00315993.jpg +Places365_test_00315994.jpg +Places365_test_00316001.jpg +Places365_test_00316002.jpg +Places365_test_00316003.jpg +Places365_test_00316028.jpg +Places365_test_00316030.jpg +Places365_test_00316035.jpg +Places365_test_00316036.jpg +Places365_test_00316037.jpg +Places365_test_00316073.jpg +Places365_test_00316096.jpg +Places365_test_00316104.jpg +Places365_test_00316108.jpg +Places365_test_00316134.jpg +Places365_test_00316168.jpg +Places365_test_00316171.jpg +Places365_test_00316189.jpg +Places365_test_00316198.jpg +Places365_test_00316206.jpg +Places365_test_00316216.jpg +Places365_test_00316217.jpg +Places365_test_00316218.jpg +Places365_test_00316221.jpg +Places365_test_00316225.jpg +Places365_test_00316232.jpg +Places365_test_00316233.jpg +Places365_test_00316236.jpg +Places365_test_00316242.jpg +Places365_test_00316243.jpg +Places365_test_00316244.jpg +Places365_test_00316254.jpg +Places365_test_00316264.jpg +Places365_test_00316269.jpg +Places365_test_00316271.jpg +Places365_test_00316273.jpg +Places365_test_00316275.jpg +Places365_test_00316283.jpg +Places365_test_00316287.jpg +Places365_test_00316288.jpg +Places365_test_00316296.jpg +Places365_test_00316298.jpg +Places365_test_00316300.jpg +Places365_test_00316307.jpg +Places365_test_00316314.jpg +Places365_test_00316315.jpg +Places365_test_00316332.jpg +Places365_test_00316337.jpg +Places365_test_00316355.jpg +Places365_test_00316361.jpg +Places365_test_00316367.jpg +Places365_test_00316404.jpg +Places365_test_00316435.jpg +Places365_test_00316438.jpg +Places365_test_00316470.jpg +Places365_test_00316475.jpg +Places365_test_00316480.jpg +Places365_test_00316536.jpg +Places365_test_00316541.jpg +Places365_test_00316561.jpg +Places365_test_00316593.jpg +Places365_test_00316631.jpg +Places365_test_00316634.jpg +Places365_test_00316644.jpg +Places365_test_00316665.jpg +Places365_test_00316671.jpg +Places365_test_00316695.jpg +Places365_test_00316698.jpg +Places365_test_00316703.jpg +Places365_test_00316709.jpg +Places365_test_00316727.jpg +Places365_test_00316728.jpg +Places365_test_00316732.jpg +Places365_test_00316757.jpg +Places365_test_00316770.jpg +Places365_test_00316792.jpg +Places365_test_00316817.jpg +Places365_test_00316821.jpg +Places365_test_00316823.jpg +Places365_test_00316835.jpg +Places365_test_00316855.jpg +Places365_test_00316862.jpg +Places365_test_00316926.jpg +Places365_test_00316931.jpg +Places365_test_00316935.jpg +Places365_test_00316936.jpg +Places365_test_00316940.jpg +Places365_test_00316942.jpg +Places365_test_00316956.jpg +Places365_test_00316960.jpg +Places365_test_00316964.jpg +Places365_test_00316973.jpg +Places365_test_00316974.jpg +Places365_test_00316986.jpg +Places365_test_00317001.jpg +Places365_test_00317006.jpg +Places365_test_00317012.jpg +Places365_test_00317020.jpg +Places365_test_00317030.jpg +Places365_test_00317056.jpg +Places365_test_00317060.jpg +Places365_test_00317062.jpg +Places365_test_00317071.jpg +Places365_test_00317079.jpg +Places365_test_00317081.jpg +Places365_test_00317085.jpg +Places365_test_00317116.jpg +Places365_test_00317155.jpg +Places365_test_00317182.jpg +Places365_test_00317190.jpg +Places365_test_00317196.jpg +Places365_test_00317219.jpg +Places365_test_00317220.jpg +Places365_test_00317224.jpg +Places365_test_00317242.jpg +Places365_test_00317260.jpg +Places365_test_00317299.jpg +Places365_test_00317342.jpg +Places365_test_00317356.jpg +Places365_test_00317366.jpg +Places365_test_00317374.jpg +Places365_test_00317375.jpg +Places365_test_00317377.jpg +Places365_test_00317390.jpg +Places365_test_00317410.jpg +Places365_test_00317412.jpg +Places365_test_00317431.jpg +Places365_test_00317443.jpg +Places365_test_00317448.jpg +Places365_test_00317451.jpg +Places365_test_00317453.jpg +Places365_test_00317472.jpg +Places365_test_00317479.jpg +Places365_test_00317480.jpg +Places365_test_00317492.jpg +Places365_test_00317528.jpg +Places365_test_00317531.jpg +Places365_test_00317534.jpg +Places365_test_00317539.jpg +Places365_test_00317566.jpg +Places365_test_00317596.jpg +Places365_test_00317598.jpg +Places365_test_00317603.jpg +Places365_test_00317647.jpg +Places365_test_00317650.jpg +Places365_test_00317652.jpg +Places365_test_00317657.jpg +Places365_test_00317680.jpg +Places365_test_00317682.jpg +Places365_test_00317695.jpg +Places365_test_00317718.jpg +Places365_test_00317733.jpg +Places365_test_00317735.jpg +Places365_test_00317744.jpg +Places365_test_00317758.jpg +Places365_test_00317775.jpg +Places365_test_00317781.jpg +Places365_test_00317785.jpg +Places365_test_00317786.jpg +Places365_test_00317792.jpg +Places365_test_00317833.jpg +Places365_test_00317834.jpg +Places365_test_00317843.jpg +Places365_test_00317858.jpg +Places365_test_00317864.jpg +Places365_test_00317876.jpg +Places365_test_00317880.jpg +Places365_test_00317881.jpg +Places365_test_00317889.jpg +Places365_test_00317892.jpg +Places365_test_00317913.jpg +Places365_test_00317919.jpg +Places365_test_00317922.jpg +Places365_test_00317926.jpg +Places365_test_00317948.jpg +Places365_test_00317953.jpg +Places365_test_00317958.jpg +Places365_test_00317961.jpg +Places365_test_00317965.jpg +Places365_test_00317967.jpg +Places365_test_00317986.jpg +Places365_test_00318003.jpg +Places365_test_00318005.jpg +Places365_test_00318008.jpg +Places365_test_00318095.jpg +Places365_test_00318097.jpg +Places365_test_00318099.jpg +Places365_test_00318109.jpg +Places365_test_00318114.jpg +Places365_test_00318115.jpg +Places365_test_00318121.jpg +Places365_test_00318122.jpg +Places365_test_00318130.jpg +Places365_test_00318143.jpg +Places365_test_00318155.jpg +Places365_test_00318158.jpg +Places365_test_00318162.jpg +Places365_test_00318191.jpg +Places365_test_00318200.jpg +Places365_test_00318204.jpg +Places365_test_00318221.jpg +Places365_test_00318222.jpg +Places365_test_00318242.jpg +Places365_test_00318243.jpg +Places365_test_00318247.jpg +Places365_test_00318248.jpg +Places365_test_00318265.jpg +Places365_test_00318284.jpg +Places365_test_00318289.jpg +Places365_test_00318302.jpg +Places365_test_00318338.jpg +Places365_test_00318340.jpg +Places365_test_00318341.jpg +Places365_test_00318361.jpg +Places365_test_00318367.jpg +Places365_test_00318390.jpg +Places365_test_00318414.jpg +Places365_test_00318434.jpg +Places365_test_00318448.jpg +Places365_test_00318451.jpg +Places365_test_00318458.jpg +Places365_test_00318459.jpg +Places365_test_00318467.jpg +Places365_test_00318468.jpg +Places365_test_00318471.jpg +Places365_test_00318473.jpg +Places365_test_00318481.jpg +Places365_test_00318492.jpg +Places365_test_00318513.jpg +Places365_test_00318518.jpg +Places365_test_00318542.jpg +Places365_test_00318553.jpg +Places365_test_00318557.jpg +Places365_test_00318558.jpg +Places365_test_00318560.jpg +Places365_test_00318586.jpg +Places365_test_00318588.jpg +Places365_test_00318596.jpg +Places365_test_00318599.jpg +Places365_test_00318602.jpg +Places365_test_00318603.jpg +Places365_test_00318605.jpg +Places365_test_00318615.jpg +Places365_test_00318644.jpg +Places365_test_00318651.jpg +Places365_test_00318655.jpg +Places365_test_00318670.jpg +Places365_test_00318673.jpg +Places365_test_00318675.jpg +Places365_test_00318676.jpg +Places365_test_00318701.jpg +Places365_test_00318714.jpg +Places365_test_00318719.jpg +Places365_test_00318732.jpg +Places365_test_00318739.jpg +Places365_test_00318763.jpg +Places365_test_00318769.jpg +Places365_test_00318775.jpg +Places365_test_00318796.jpg +Places365_test_00318798.jpg +Places365_test_00318817.jpg +Places365_test_00318819.jpg +Places365_test_00318829.jpg +Places365_test_00318837.jpg +Places365_test_00318839.jpg +Places365_test_00318851.jpg +Places365_test_00318871.jpg +Places365_test_00318896.jpg +Places365_test_00318898.jpg +Places365_test_00318912.jpg +Places365_test_00318920.jpg +Places365_test_00318930.jpg +Places365_test_00318935.jpg +Places365_test_00318954.jpg +Places365_test_00318964.jpg +Places365_test_00318975.jpg +Places365_test_00318989.jpg +Places365_test_00319035.jpg +Places365_test_00319048.jpg +Places365_test_00319053.jpg +Places365_test_00319057.jpg +Places365_test_00319058.jpg +Places365_test_00319075.jpg +Places365_test_00319081.jpg +Places365_test_00319090.jpg +Places365_test_00319096.jpg +Places365_test_00319109.jpg +Places365_test_00319115.jpg +Places365_test_00319116.jpg +Places365_test_00319121.jpg +Places365_test_00319122.jpg +Places365_test_00319137.jpg +Places365_test_00319144.jpg +Places365_test_00319159.jpg +Places365_test_00319169.jpg +Places365_test_00319184.jpg +Places365_test_00319210.jpg +Places365_test_00319214.jpg +Places365_test_00319244.jpg +Places365_test_00319252.jpg +Places365_test_00319259.jpg +Places365_test_00319268.jpg +Places365_test_00319274.jpg +Places365_test_00319277.jpg +Places365_test_00319297.jpg +Places365_test_00319306.jpg +Places365_test_00319317.jpg +Places365_test_00319331.jpg +Places365_test_00319335.jpg +Places365_test_00319355.jpg +Places365_test_00319361.jpg +Places365_test_00319402.jpg +Places365_test_00319407.jpg +Places365_test_00319414.jpg +Places365_test_00319425.jpg +Places365_test_00319443.jpg +Places365_test_00319451.jpg +Places365_test_00319462.jpg +Places365_test_00319472.jpg +Places365_test_00319481.jpg +Places365_test_00319485.jpg +Places365_test_00319495.jpg +Places365_test_00319499.jpg +Places365_test_00319502.jpg +Places365_test_00319528.jpg +Places365_test_00319533.jpg +Places365_test_00319534.jpg +Places365_test_00319537.jpg +Places365_test_00319552.jpg +Places365_test_00319570.jpg +Places365_test_00319591.jpg +Places365_test_00319630.jpg +Places365_test_00319644.jpg +Places365_test_00319650.jpg +Places365_test_00319657.jpg +Places365_test_00319659.jpg +Places365_test_00319662.jpg +Places365_test_00319667.jpg +Places365_test_00319719.jpg +Places365_test_00319731.jpg +Places365_test_00319749.jpg +Places365_test_00319751.jpg +Places365_test_00319765.jpg +Places365_test_00319766.jpg +Places365_test_00319778.jpg +Places365_test_00319796.jpg +Places365_test_00319804.jpg +Places365_test_00319811.jpg +Places365_test_00319818.jpg +Places365_test_00319825.jpg +Places365_test_00319850.jpg +Places365_test_00319860.jpg +Places365_test_00319863.jpg +Places365_test_00319865.jpg +Places365_test_00319869.jpg +Places365_test_00319892.jpg +Places365_test_00319894.jpg +Places365_test_00319915.jpg +Places365_test_00319919.jpg +Places365_test_00319921.jpg +Places365_test_00319933.jpg +Places365_test_00319959.jpg +Places365_test_00319983.jpg +Places365_test_00320005.jpg +Places365_test_00320012.jpg +Places365_test_00320013.jpg +Places365_test_00320022.jpg +Places365_test_00320028.jpg +Places365_test_00320029.jpg +Places365_test_00320058.jpg +Places365_test_00320061.jpg +Places365_test_00320063.jpg +Places365_test_00320079.jpg +Places365_test_00320094.jpg +Places365_test_00320099.jpg +Places365_test_00320115.jpg +Places365_test_00320124.jpg +Places365_test_00320129.jpg +Places365_test_00320142.jpg +Places365_test_00320143.jpg +Places365_test_00320159.jpg +Places365_test_00320164.jpg +Places365_test_00320171.jpg +Places365_test_00320174.jpg +Places365_test_00320187.jpg +Places365_test_00320190.jpg +Places365_test_00320192.jpg +Places365_test_00320193.jpg +Places365_test_00320199.jpg +Places365_test_00320218.jpg +Places365_test_00320232.jpg +Places365_test_00320235.jpg +Places365_test_00320241.jpg +Places365_test_00320250.jpg +Places365_test_00320259.jpg +Places365_test_00320264.jpg +Places365_test_00320271.jpg +Places365_test_00320273.jpg +Places365_test_00320290.jpg +Places365_test_00320291.jpg +Places365_test_00320300.jpg +Places365_test_00320306.jpg +Places365_test_00320308.jpg +Places365_test_00320338.jpg +Places365_test_00320345.jpg +Places365_test_00320348.jpg +Places365_test_00320378.jpg +Places365_test_00320383.jpg +Places365_test_00320389.jpg +Places365_test_00320402.jpg +Places365_test_00320451.jpg +Places365_test_00320466.jpg +Places365_test_00320473.jpg +Places365_test_00320476.jpg +Places365_test_00320478.jpg +Places365_test_00320482.jpg +Places365_test_00320490.jpg +Places365_test_00320503.jpg +Places365_test_00320505.jpg +Places365_test_00320507.jpg +Places365_test_00320518.jpg +Places365_test_00320519.jpg +Places365_test_00320524.jpg +Places365_test_00320525.jpg +Places365_test_00320526.jpg +Places365_test_00320545.jpg +Places365_test_00320574.jpg +Places365_test_00320576.jpg +Places365_test_00320586.jpg +Places365_test_00320587.jpg +Places365_test_00320596.jpg +Places365_test_00320603.jpg +Places365_test_00320607.jpg +Places365_test_00320642.jpg +Places365_test_00320647.jpg +Places365_test_00320684.jpg +Places365_test_00320687.jpg +Places365_test_00320690.jpg +Places365_test_00320692.jpg +Places365_test_00320696.jpg +Places365_test_00320702.jpg +Places365_test_00320715.jpg +Places365_test_00320751.jpg +Places365_test_00320753.jpg +Places365_test_00320755.jpg +Places365_test_00320760.jpg +Places365_test_00320763.jpg +Places365_test_00320775.jpg +Places365_test_00320780.jpg +Places365_test_00320829.jpg +Places365_test_00320832.jpg +Places365_test_00320845.jpg +Places365_test_00320850.jpg +Places365_test_00320856.jpg +Places365_test_00320864.jpg +Places365_test_00320868.jpg +Places365_test_00320877.jpg +Places365_test_00320893.jpg +Places365_test_00320900.jpg +Places365_test_00320908.jpg +Places365_test_00320921.jpg +Places365_test_00320927.jpg +Places365_test_00320930.jpg +Places365_test_00320935.jpg +Places365_test_00320954.jpg +Places365_test_00320966.jpg +Places365_test_00320978.jpg +Places365_test_00320983.jpg +Places365_test_00320986.jpg +Places365_test_00320989.jpg +Places365_test_00320992.jpg +Places365_test_00320994.jpg +Places365_test_00320996.jpg +Places365_test_00320999.jpg +Places365_test_00321017.jpg +Places365_test_00321035.jpg +Places365_test_00321042.jpg +Places365_test_00321052.jpg +Places365_test_00321054.jpg +Places365_test_00321059.jpg +Places365_test_00321063.jpg +Places365_test_00321096.jpg +Places365_test_00321140.jpg +Places365_test_00321169.jpg +Places365_test_00321174.jpg +Places365_test_00321182.jpg +Places365_test_00321197.jpg +Places365_test_00321209.jpg +Places365_test_00321214.jpg +Places365_test_00321231.jpg +Places365_test_00321233.jpg +Places365_test_00321236.jpg +Places365_test_00321243.jpg +Places365_test_00321248.jpg +Places365_test_00321250.jpg +Places365_test_00321270.jpg +Places365_test_00321273.jpg +Places365_test_00321292.jpg +Places365_test_00321300.jpg +Places365_test_00321302.jpg +Places365_test_00321304.jpg +Places365_test_00321306.jpg +Places365_test_00321312.jpg +Places365_test_00321327.jpg +Places365_test_00321334.jpg +Places365_test_00321335.jpg +Places365_test_00321367.jpg +Places365_test_00321374.jpg +Places365_test_00321379.jpg +Places365_test_00321381.jpg +Places365_test_00321397.jpg +Places365_test_00321417.jpg +Places365_test_00321426.jpg +Places365_test_00321430.jpg +Places365_test_00321441.jpg +Places365_test_00321462.jpg +Places365_test_00321468.jpg +Places365_test_00321469.jpg +Places365_test_00321471.jpg +Places365_test_00321477.jpg +Places365_test_00321501.jpg +Places365_test_00321503.jpg +Places365_test_00321519.jpg +Places365_test_00321544.jpg +Places365_test_00321546.jpg +Places365_test_00321556.jpg +Places365_test_00321573.jpg +Places365_test_00321593.jpg +Places365_test_00321599.jpg +Places365_test_00321624.jpg +Places365_test_00321632.jpg +Places365_test_00321655.jpg +Places365_test_00321665.jpg +Places365_test_00321669.jpg +Places365_test_00321672.jpg +Places365_test_00321675.jpg +Places365_test_00321677.jpg +Places365_test_00321686.jpg +Places365_test_00321698.jpg +Places365_test_00321700.jpg +Places365_test_00321707.jpg +Places365_test_00321716.jpg +Places365_test_00321719.jpg +Places365_test_00321731.jpg +Places365_test_00321748.jpg +Places365_test_00321762.jpg +Places365_test_00321766.jpg +Places365_test_00321770.jpg +Places365_test_00321771.jpg +Places365_test_00321801.jpg +Places365_test_00321861.jpg +Places365_test_00321883.jpg +Places365_test_00321886.jpg +Places365_test_00321900.jpg +Places365_test_00321902.jpg +Places365_test_00321916.jpg +Places365_test_00321918.jpg +Places365_test_00321932.jpg +Places365_test_00321937.jpg +Places365_test_00321957.jpg +Places365_test_00321960.jpg +Places365_test_00321962.jpg +Places365_test_00321963.jpg +Places365_test_00321980.jpg +Places365_test_00321984.jpg +Places365_test_00321997.jpg +Places365_test_00322036.jpg +Places365_test_00322047.jpg +Places365_test_00322059.jpg +Places365_test_00322068.jpg +Places365_test_00322070.jpg +Places365_test_00322073.jpg +Places365_test_00322111.jpg +Places365_test_00322118.jpg +Places365_test_00322158.jpg +Places365_test_00322159.jpg +Places365_test_00322165.jpg +Places365_test_00322177.jpg +Places365_test_00322182.jpg +Places365_test_00322190.jpg +Places365_test_00322196.jpg +Places365_test_00322215.jpg +Places365_test_00322221.jpg +Places365_test_00322253.jpg +Places365_test_00322263.jpg +Places365_test_00322267.jpg +Places365_test_00322289.jpg +Places365_test_00322312.jpg +Places365_test_00322316.jpg +Places365_test_00322323.jpg +Places365_test_00322335.jpg +Places365_test_00322362.jpg +Places365_test_00322376.jpg +Places365_test_00322401.jpg +Places365_test_00322419.jpg +Places365_test_00322462.jpg +Places365_test_00322483.jpg +Places365_test_00322500.jpg +Places365_test_00322513.jpg +Places365_test_00322524.jpg +Places365_test_00322541.jpg +Places365_test_00322550.jpg +Places365_test_00322567.jpg +Places365_test_00322572.jpg +Places365_test_00322581.jpg +Places365_test_00322588.jpg +Places365_test_00322606.jpg +Places365_test_00322609.jpg +Places365_test_00322610.jpg +Places365_test_00322623.jpg +Places365_test_00322627.jpg +Places365_test_00322636.jpg +Places365_test_00322640.jpg +Places365_test_00322642.jpg +Places365_test_00322648.jpg +Places365_test_00322662.jpg +Places365_test_00322684.jpg +Places365_test_00322692.jpg +Places365_test_00322695.jpg +Places365_test_00322737.jpg +Places365_test_00322763.jpg +Places365_test_00322775.jpg +Places365_test_00322796.jpg +Places365_test_00322816.jpg +Places365_test_00322825.jpg +Places365_test_00322864.jpg +Places365_test_00322868.jpg +Places365_test_00322872.jpg +Places365_test_00322878.jpg +Places365_test_00322885.jpg +Places365_test_00322892.jpg +Places365_test_00322902.jpg +Places365_test_00322918.jpg +Places365_test_00322919.jpg +Places365_test_00322921.jpg +Places365_test_00322933.jpg +Places365_test_00322934.jpg +Places365_test_00322936.jpg +Places365_test_00322938.jpg +Places365_test_00322943.jpg +Places365_test_00322953.jpg +Places365_test_00322965.jpg +Places365_test_00322993.jpg +Places365_test_00322996.jpg +Places365_test_00323000.jpg +Places365_test_00323009.jpg +Places365_test_00323012.jpg +Places365_test_00323041.jpg +Places365_test_00323049.jpg +Places365_test_00323065.jpg +Places365_test_00323070.jpg +Places365_test_00323081.jpg +Places365_test_00323083.jpg +Places365_test_00323088.jpg +Places365_test_00323094.jpg +Places365_test_00323114.jpg +Places365_test_00323123.jpg +Places365_test_00323152.jpg +Places365_test_00323175.jpg +Places365_test_00323189.jpg +Places365_test_00323207.jpg +Places365_test_00323215.jpg +Places365_test_00323226.jpg +Places365_test_00323236.jpg +Places365_test_00323255.jpg +Places365_test_00323256.jpg +Places365_test_00323258.jpg +Places365_test_00323260.jpg +Places365_test_00323285.jpg +Places365_test_00323294.jpg +Places365_test_00323296.jpg +Places365_test_00323298.jpg +Places365_test_00323315.jpg +Places365_test_00323323.jpg +Places365_test_00323333.jpg +Places365_test_00323346.jpg +Places365_test_00323358.jpg +Places365_test_00323366.jpg +Places365_test_00323376.jpg +Places365_test_00323377.jpg +Places365_test_00323381.jpg +Places365_test_00323389.jpg +Places365_test_00323392.jpg +Places365_test_00323403.jpg +Places365_test_00323404.jpg +Places365_test_00323405.jpg +Places365_test_00323415.jpg +Places365_test_00323423.jpg +Places365_test_00323461.jpg +Places365_test_00323473.jpg +Places365_test_00323495.jpg +Places365_test_00323496.jpg +Places365_test_00323539.jpg +Places365_test_00323554.jpg +Places365_test_00323565.jpg +Places365_test_00323593.jpg +Places365_test_00323594.jpg +Places365_test_00323601.jpg +Places365_test_00323606.jpg +Places365_test_00323608.jpg +Places365_test_00323623.jpg +Places365_test_00323629.jpg +Places365_test_00323639.jpg +Places365_test_00323644.jpg +Places365_test_00323659.jpg +Places365_test_00323660.jpg +Places365_test_00323675.jpg +Places365_test_00323699.jpg +Places365_test_00323704.jpg +Places365_test_00323709.jpg +Places365_test_00323717.jpg +Places365_test_00323755.jpg +Places365_test_00323762.jpg +Places365_test_00323783.jpg +Places365_test_00323788.jpg +Places365_test_00323804.jpg +Places365_test_00323826.jpg +Places365_test_00323827.jpg +Places365_test_00323836.jpg +Places365_test_00323837.jpg +Places365_test_00323848.jpg +Places365_test_00323883.jpg +Places365_test_00323884.jpg +Places365_test_00323893.jpg +Places365_test_00323900.jpg +Places365_test_00323920.jpg +Places365_test_00323923.jpg +Places365_test_00323924.jpg +Places365_test_00323933.jpg +Places365_test_00323937.jpg +Places365_test_00323966.jpg +Places365_test_00324013.jpg +Places365_test_00324043.jpg +Places365_test_00324063.jpg +Places365_test_00324070.jpg +Places365_test_00324073.jpg +Places365_test_00324080.jpg +Places365_test_00324106.jpg +Places365_test_00324123.jpg +Places365_test_00324138.jpg +Places365_test_00324140.jpg +Places365_test_00324146.jpg +Places365_test_00324148.jpg +Places365_test_00324151.jpg +Places365_test_00324158.jpg +Places365_test_00324168.jpg +Places365_test_00324181.jpg +Places365_test_00324194.jpg +Places365_test_00324227.jpg +Places365_test_00324238.jpg +Places365_test_00324259.jpg +Places365_test_00324272.jpg +Places365_test_00324274.jpg +Places365_test_00324293.jpg +Places365_test_00324294.jpg +Places365_test_00324300.jpg +Places365_test_00324307.jpg +Places365_test_00324308.jpg +Places365_test_00324310.jpg +Places365_test_00324315.jpg +Places365_test_00324318.jpg +Places365_test_00324330.jpg +Places365_test_00324331.jpg +Places365_test_00324336.jpg +Places365_test_00324345.jpg +Places365_test_00324363.jpg +Places365_test_00324367.jpg +Places365_test_00324373.jpg +Places365_test_00324389.jpg +Places365_test_00324394.jpg +Places365_test_00324406.jpg +Places365_test_00324419.jpg +Places365_test_00324420.jpg +Places365_test_00324448.jpg +Places365_test_00324489.jpg +Places365_test_00324491.jpg +Places365_test_00324543.jpg +Places365_test_00324546.jpg +Places365_test_00324551.jpg +Places365_test_00324554.jpg +Places365_test_00324560.jpg +Places365_test_00324585.jpg +Places365_test_00324587.jpg +Places365_test_00324600.jpg +Places365_test_00324622.jpg +Places365_test_00324623.jpg +Places365_test_00324642.jpg +Places365_test_00324654.jpg +Places365_test_00324664.jpg +Places365_test_00324702.jpg +Places365_test_00324712.jpg +Places365_test_00324724.jpg +Places365_test_00324728.jpg +Places365_test_00324749.jpg +Places365_test_00324779.jpg +Places365_test_00324781.jpg +Places365_test_00324785.jpg +Places365_test_00324798.jpg +Places365_test_00324836.jpg +Places365_test_00324840.jpg +Places365_test_00324862.jpg +Places365_test_00324863.jpg +Places365_test_00324869.jpg +Places365_test_00324908.jpg +Places365_test_00324921.jpg +Places365_test_00324948.jpg +Places365_test_00324950.jpg +Places365_test_00324970.jpg +Places365_test_00324973.jpg +Places365_test_00324995.jpg +Places365_test_00324996.jpg +Places365_test_00325018.jpg +Places365_test_00325019.jpg +Places365_test_00325025.jpg +Places365_test_00325053.jpg +Places365_test_00325072.jpg +Places365_test_00325080.jpg +Places365_test_00325084.jpg +Places365_test_00325087.jpg +Places365_test_00325089.jpg +Places365_test_00325090.jpg +Places365_test_00325100.jpg +Places365_test_00325108.jpg +Places365_test_00325109.jpg +Places365_test_00325140.jpg +Places365_test_00325175.jpg +Places365_test_00325188.jpg +Places365_test_00325190.jpg +Places365_test_00325199.jpg +Places365_test_00325206.jpg +Places365_test_00325222.jpg +Places365_test_00325236.jpg +Places365_test_00325246.jpg +Places365_test_00325265.jpg +Places365_test_00325275.jpg +Places365_test_00325277.jpg +Places365_test_00325285.jpg +Places365_test_00325300.jpg +Places365_test_00325316.jpg +Places365_test_00325320.jpg +Places365_test_00325325.jpg +Places365_test_00325350.jpg +Places365_test_00325353.jpg +Places365_test_00325354.jpg +Places365_test_00325359.jpg +Places365_test_00325362.jpg +Places365_test_00325372.jpg +Places365_test_00325382.jpg +Places365_test_00325392.jpg +Places365_test_00325396.jpg +Places365_test_00325399.jpg +Places365_test_00325418.jpg +Places365_test_00325429.jpg +Places365_test_00325436.jpg +Places365_test_00325466.jpg +Places365_test_00325473.jpg +Places365_test_00325478.jpg +Places365_test_00325494.jpg +Places365_test_00325499.jpg +Places365_test_00325500.jpg +Places365_test_00325502.jpg +Places365_test_00325523.jpg +Places365_test_00325556.jpg +Places365_test_00325570.jpg +Places365_test_00325576.jpg +Places365_test_00325599.jpg +Places365_test_00325604.jpg +Places365_test_00325635.jpg +Places365_test_00325648.jpg +Places365_test_00325652.jpg +Places365_test_00325656.jpg +Places365_test_00325662.jpg +Places365_test_00325683.jpg +Places365_test_00325693.jpg +Places365_test_00325695.jpg +Places365_test_00325713.jpg +Places365_test_00325725.jpg +Places365_test_00325741.jpg +Places365_test_00325743.jpg +Places365_test_00325763.jpg +Places365_test_00325775.jpg +Places365_test_00325794.jpg +Places365_test_00325802.jpg +Places365_test_00325807.jpg +Places365_test_00325808.jpg +Places365_test_00325813.jpg +Places365_test_00325824.jpg +Places365_test_00325827.jpg +Places365_test_00325832.jpg +Places365_test_00325834.jpg +Places365_test_00325839.jpg +Places365_test_00325841.jpg +Places365_test_00325864.jpg +Places365_test_00325873.jpg +Places365_test_00325893.jpg +Places365_test_00325903.jpg +Places365_test_00325912.jpg +Places365_test_00325916.jpg +Places365_test_00325924.jpg +Places365_test_00325925.jpg +Places365_test_00325929.jpg +Places365_test_00325962.jpg +Places365_test_00325963.jpg +Places365_test_00325964.jpg +Places365_test_00325973.jpg +Places365_test_00325982.jpg +Places365_test_00325985.jpg +Places365_test_00326009.jpg +Places365_test_00326013.jpg +Places365_test_00326019.jpg +Places365_test_00326026.jpg +Places365_test_00326027.jpg +Places365_test_00326028.jpg +Places365_test_00326031.jpg +Places365_test_00326035.jpg +Places365_test_00326043.jpg +Places365_test_00326051.jpg +Places365_test_00326070.jpg +Places365_test_00326073.jpg +Places365_test_00326091.jpg +Places365_test_00326099.jpg +Places365_test_00326102.jpg +Places365_test_00326116.jpg +Places365_test_00326121.jpg +Places365_test_00326152.jpg +Places365_test_00326154.jpg +Places365_test_00326171.jpg +Places365_test_00326193.jpg +Places365_test_00326197.jpg +Places365_test_00326200.jpg +Places365_test_00326210.jpg +Places365_test_00326225.jpg +Places365_test_00326230.jpg +Places365_test_00326238.jpg +Places365_test_00326242.jpg +Places365_test_00326253.jpg +Places365_test_00326257.jpg +Places365_test_00326266.jpg +Places365_test_00326268.jpg +Places365_test_00326271.jpg +Places365_test_00326312.jpg +Places365_test_00326313.jpg +Places365_test_00326323.jpg +Places365_test_00326332.jpg +Places365_test_00326339.jpg +Places365_test_00326341.jpg +Places365_test_00326342.jpg +Places365_test_00326343.jpg +Places365_test_00326352.jpg +Places365_test_00326358.jpg +Places365_test_00326361.jpg +Places365_test_00326371.jpg +Places365_test_00326385.jpg +Places365_test_00326399.jpg +Places365_test_00326412.jpg +Places365_test_00326413.jpg +Places365_test_00326440.jpg +Places365_test_00326441.jpg +Places365_test_00326451.jpg +Places365_test_00326464.jpg +Places365_test_00326484.jpg +Places365_test_00326493.jpg +Places365_test_00326501.jpg +Places365_test_00326511.jpg +Places365_test_00326514.jpg +Places365_test_00326518.jpg +Places365_test_00326522.jpg +Places365_test_00326525.jpg +Places365_test_00326539.jpg +Places365_test_00326543.jpg +Places365_test_00326566.jpg +Places365_test_00326573.jpg +Places365_test_00326583.jpg +Places365_test_00326585.jpg +Places365_test_00326597.jpg +Places365_test_00326598.jpg +Places365_test_00326621.jpg +Places365_test_00326625.jpg +Places365_test_00326636.jpg +Places365_test_00326640.jpg +Places365_test_00326654.jpg +Places365_test_00326659.jpg +Places365_test_00326686.jpg +Places365_test_00326687.jpg +Places365_test_00326691.jpg +Places365_test_00326698.jpg +Places365_test_00326704.jpg +Places365_test_00326709.jpg +Places365_test_00326725.jpg +Places365_test_00326737.jpg +Places365_test_00326738.jpg +Places365_test_00326742.jpg +Places365_test_00326771.jpg +Places365_test_00326772.jpg +Places365_test_00326778.jpg +Places365_test_00326791.jpg +Places365_test_00326800.jpg +Places365_test_00326802.jpg +Places365_test_00326804.jpg +Places365_test_00326813.jpg +Places365_test_00326827.jpg +Places365_test_00326837.jpg +Places365_test_00326841.jpg +Places365_test_00326882.jpg +Places365_test_00326892.jpg +Places365_test_00326920.jpg +Places365_test_00326944.jpg +Places365_test_00326948.jpg +Places365_test_00326950.jpg +Places365_test_00326955.jpg +Places365_test_00326968.jpg +Places365_test_00326994.jpg +Places365_test_00326996.jpg +Places365_test_00327037.jpg +Places365_test_00327077.jpg +Places365_test_00327110.jpg +Places365_test_00327121.jpg +Places365_test_00327132.jpg +Places365_test_00327142.jpg +Places365_test_00327143.jpg +Places365_test_00327155.jpg +Places365_test_00327157.jpg +Places365_test_00327165.jpg +Places365_test_00327195.jpg +Places365_test_00327203.jpg +Places365_test_00327211.jpg +Places365_test_00327220.jpg +Places365_test_00327224.jpg +Places365_test_00327231.jpg +Places365_test_00327244.jpg +Places365_test_00327254.jpg +Places365_test_00327265.jpg +Places365_test_00327275.jpg +Places365_test_00327281.jpg +Places365_test_00327285.jpg +Places365_test_00327291.jpg +Places365_test_00327292.jpg +Places365_test_00327293.jpg +Places365_test_00327361.jpg +Places365_test_00327368.jpg +Places365_test_00327371.jpg +Places365_test_00327392.jpg +Places365_test_00327396.jpg +Places365_test_00327412.jpg +Places365_test_00327414.jpg +Places365_test_00327421.jpg +Places365_test_00327422.jpg +Places365_test_00327434.jpg +Places365_test_00327465.jpg +Places365_test_00327468.jpg +Places365_test_00327470.jpg +Places365_test_00327472.jpg +Places365_test_00327493.jpg +Places365_test_00327499.jpg +Places365_test_00327500.jpg +Places365_test_00327509.jpg +Places365_test_00327515.jpg +Places365_test_00327538.jpg +Places365_test_00327548.jpg +Places365_test_00327549.jpg +Places365_test_00327557.jpg +Places365_test_00327566.jpg +Places365_test_00327586.jpg +Places365_test_00327620.jpg +Places365_test_00327636.jpg +Places365_test_00327675.jpg +Places365_test_00327685.jpg +Places365_test_00327686.jpg +Places365_test_00327708.jpg +Places365_test_00327721.jpg +Places365_test_00327722.jpg +Places365_test_00327728.jpg +Places365_test_00327744.jpg +Places365_test_00327746.jpg +Places365_test_00327770.jpg +Places365_test_00327781.jpg +Places365_test_00327792.jpg +Places365_test_00327795.jpg +Places365_test_00327799.jpg +Places365_test_00327814.jpg +Places365_test_00327821.jpg +Places365_test_00327828.jpg +Places365_test_00327848.jpg +Places365_test_00327853.jpg +Places365_test_00327860.jpg +Places365_test_00327877.jpg +Places365_test_00327890.jpg +Places365_test_00327894.jpg +Places365_test_00327920.jpg +Places365_test_00327928.jpg +Places365_test_00327932.jpg +Places365_test_00327938.jpg +Places365_test_00327950.jpg +Places365_test_00327952.jpg +Places365_test_00327955.jpg +Places365_test_00327957.jpg +Places365_test_00327958.jpg +Places365_test_00327965.jpg +Places365_test_00327969.jpg +Places365_test_00327976.jpg +Places365_test_00328002.jpg +Places365_test_00328012.jpg +Places365_test_00328014.jpg +Places365_test_00328041.jpg +Places365_test_00328065.jpg +Places365_test_00328076.jpg +Places365_test_00328104.jpg +Places365_test_00328106.jpg +Places365_test_00328122.jpg +Places365_test_00328131.jpg +Places365_test_00328152.jpg +Places365_test_00328157.jpg +Places365_test_00328161.jpg +Places365_test_00328221.jpg +Places365_test_00328230.jpg +Places365_test_00328238.jpg +Places365_test_00328241.jpg +Places365_test_00328243.jpg +Places365_test_00328256.jpg +Places365_test_00328263.jpg +Places365_test_00328295.jpg +Places365_test_00328308.jpg +Places365_test_00328316.jpg +Places365_test_00328325.jpg +Places365_test_00328326.jpg +Places365_test_00328328.jpg +Places365_test_00328334.jpg +Places365_test_00328339.jpg +Places365_test_00328342.jpg +Places365_test_00328343.jpg +Places365_test_00328344.jpg +Places365_test_00328352.jpg +Places365_test_00328354.jpg +Places365_test_00328360.jpg +Places365_test_00328369.jpg +Places365_test_00328373.jpg +Places365_test_00328389.jpg +Places365_test_00328404.jpg +Places365_test_00328424.jpg +Places365_test_00328425.jpg +Places365_test_00328466.jpg +Places365_test_00328477.jpg +Places365_test_00328482.jpg +Places365_test_00328485.jpg +Places365_test_00328486.jpg +Places365_test_00328489.jpg +Places365_test_00328499.jpg diff --git a/lama/configs/training/ablv2_work.yaml b/lama/configs/training/ablv2_work.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c2a2657af1eea35118e3fe81b3a2d7485b583891 --- /dev/null +++ b/lama/configs/training/ablv2_work.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_ffc075.yaml b/lama/configs/training/ablv2_work_ffc075.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8bea26c5d024d70d8e59c565612e15fe05b8673b --- /dev/null +++ b/lama/configs/training/ablv2_work_ffc075.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: ffc_resnet_075 + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_md.yaml b/lama/configs/training/ablv2_work_md.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b8b86f093d40d45586f450515acbe06371e437b2 --- /dev/null +++ b/lama/configs/training/ablv2_work_md.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_multidilated_catin_4dil_9b + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_benchmark + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_no_fm.yaml b/lama/configs/training/ablv2_work_no_fm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aac1c0cfc23c211816133745c5628ace775feba6 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_fm.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 0 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: mlp-mow-final + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_no_segmpl.yaml b/lama/configs/training/ablv2_work_no_segmpl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..18b73e7f202686fbb5d01044bb840ae3e6f69724 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_segmpl.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 0 +# weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_no_segmpl_csdilirpl.yaml b/lama/configs/training/ablv2_work_no_segmpl_csdilirpl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cb8075cfdddd97a288f0630d8611e8d68cec2573 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_segmpl_csdilirpl.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 1 + segmentation: false + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_benchmark + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_no_segmpl_csdilirpl_celeba_csdilirpl1_new.yaml b/lama/configs/training/ablv2_work_no_segmpl_csdilirpl_celeba_csdilirpl1_new.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26c6cac7a905b30d713b77aec72b14e3d2b1b3a2 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_segmpl_csdilirpl_celeba_csdilirpl1_new.yaml @@ -0,0 +1,35 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + segm_pl: + weight: 1 + imagenet_weights: true + +defaults: + - location: celeba + - data: abl-04-256-mh-dist-celeba + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_celeba + - hydra: overrides \ No newline at end of file diff --git a/lama/configs/training/ablv2_work_no_segmpl_csirpl.yaml b/lama/configs/training/ablv2_work_no_segmpl_csirpl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da15b179cfbaf8465042bbbe2f4b1878d6bb69f8 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_segmpl_csirpl.yaml @@ -0,0 +1,37 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 0.3 + arch_encoder: 'resnet50' + segmentation: false + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_no_segmpl_csirpl_celeba_csirpl03_new.yaml b/lama/configs/training/ablv2_work_no_segmpl_csirpl_celeba_csirpl03_new.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9061907ca975ef3bf8e6e36e6ad84e57e0f14d52 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_segmpl_csirpl_celeba_csirpl03_new.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + segm_pl: + weight: 0.3 + arch_encoder: resnet50 + imagenet_weights: true + +defaults: + - location: celeba + - data: abl-04-256-mh-dist-celeba + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_celeba + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_no_segmpl_vgg.yaml b/lama/configs/training/ablv2_work_no_segmpl_vgg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2019ce11110ecad1559ae317f60b13cffea20762 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_segmpl_vgg.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0.03 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 0 +# weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_no_segmpl_vgg_celeba_l2_vgg003_new.yaml b/lama/configs/training/ablv2_work_no_segmpl_vgg_celeba_l2_vgg003_new.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8ef6b5f8345290e6392a5ae395b9d26e55fc2031 --- /dev/null +++ b/lama/configs/training/ablv2_work_no_segmpl_vgg_celeba_l2_vgg003_new.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0.03 + kwargs: + metric: l2 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + segm_pl: + weight: 0 + +defaults: + - location: celeba + - data: abl-04-256-mh-dist-celeba + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_celeba + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_nodil_segmpl.yaml b/lama/configs/training/ablv2_work_nodil_segmpl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..481e7bcee4d44a360b65c99f4749202e96ea83ac --- /dev/null +++ b/lama/configs/training/ablv2_work_nodil_segmpl.yaml @@ -0,0 +1,37 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + arch_encoder: resnet50 + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/ablv2_work_small_holes.yaml b/lama/configs/training/ablv2_work_small_holes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8aa138bf4fc441c72c4cef70e3c834bdc2b18485 --- /dev/null +++ b/lama/configs/training/ablv2_work_small_holes.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: True + store_discr_outputs_for_vis: True + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: True + allow_scale_mask: True + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-02-thin-bb + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/big-lama-celeba.yaml b/lama/configs/training/big-lama-celeba.yaml new file mode 100644 index 0000000000000000000000000000000000000000..08320febb545396eeb8f5e96efb5ed0e7f9ca458 --- /dev/null +++ b/lama/configs/training/big-lama-celeba.yaml @@ -0,0 +1,55 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +generator: + kind: ffc_resnet + input_nc: 4 + output_nc: 3 + ngf: 64 + n_downsampling: 3 + n_blocks: 18 + add_out_act: sigmoid + init_conv_kwargs: + ratio_gin: 0 + ratio_gout: 0 + enable_lfu: false + downsample_conv_kwargs: + ratio_gin: ${generator.init_conv_kwargs.ratio_gout} + ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin} + enable_lfu: false + resnet_conv_kwargs: + ratio_gin: 0.75 + ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin} + enable_lfu: false + +defaults: + - location: celeba + - data: abl-04-256-mh-dist-celeba + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_celeba + - hydra: overrides diff --git a/lama/configs/training/big-lama-regular-celeba.yaml b/lama/configs/training/big-lama-regular-celeba.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8f6c6de07d83be1e0d9b92d5d523b96124a380af --- /dev/null +++ b/lama/configs/training/big-lama-regular-celeba.yaml @@ -0,0 +1,45 @@ +run_title: '' + +generator: + kind: pix2pixhd_global + input_nc: 4 + output_nc: 3 + ngf: 64 + n_downsampling: 3 + n_blocks: 15 + conv_kind: default + add_out_act: sigmoid + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: celeba + - data: abl-04-256-mh-dist-celeba + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_celeba + - hydra: overrides \ No newline at end of file diff --git a/lama/configs/training/big-lama-regular.yaml b/lama/configs/training/big-lama-regular.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d23c280ff70a766ec5cfc4f1a6f9d37d8afa9f64 --- /dev/null +++ b/lama/configs/training/big-lama-regular.yaml @@ -0,0 +1,45 @@ +run_title: '' + +generator: + kind: pix2pixhd_global + input_nc: 4 + output_nc: 3 + ngf: 64 + n_downsampling: 3 + n_blocks: 15 + conv_kind: default + add_out_act: sigmoid + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides \ No newline at end of file diff --git a/lama/configs/training/big-lama.yaml b/lama/configs/training/big-lama.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b04f809b17b8f2aa4ea111bd34d38d881e665d42 --- /dev/null +++ b/lama/configs/training/big-lama.yaml @@ -0,0 +1,55 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +generator: + kind: ffc_resnet + input_nc: 4 + output_nc: 3 + ngf: 64 + n_downsampling: 3 + n_blocks: 18 + add_out_act: sigmoid + init_conv_kwargs: + ratio_gin: 0 + ratio_gout: 0 + enable_lfu: false + downsample_conv_kwargs: + ratio_gin: ${generator.init_conv_kwargs.ratio_gout} + ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin} + enable_lfu: false + resnet_conv_kwargs: + ratio_gin: 0.75 + ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin} + enable_lfu: false + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/data/abl-02-thin-bb.yaml b/lama/configs/training/data/abl-02-thin-bb.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4506f7bf96bca905c7af09d20bfa297998f92d94 --- /dev/null +++ b/lama/configs/training/data/abl-02-thin-bb.yaml @@ -0,0 +1,115 @@ +# @package _group_ + +# try to resemble mask generation of DeepFill v2 +# official tf version: https://github.com/JiahuiYu/generative_inpainting/blob/master/inpaint_ops.py#L168 +# pytorch version: https://github.com/zhaoyuzhi/deepfillv2/blob/62dad2c601400e14d79f4d1e090c2effcb9bf3eb/deepfillv2/dataset.py#L40 +# another unofficial pytorch version: https://github.com/avalonstrel/GatedConvolution/blob/master/config/inpaint.yml +# they are a bit different, official version has slightly larger masks + +batch_size: 10 +val_batch_size: 2 +num_workers: 3 + +train: + indir: ${location.data_root_dir}/train + out_size: 256 + + mask_gen_kwargs: # probabilities do not need to sum to 1, they are re-normalized in mask generator + irregular_proba: 1 + irregular_kwargs: + max_angle: 4 + max_len: 80 # math.sqrt(H*H+W*W) / 8 + math.sqrt(H*H+W*W) / 16 https://github.com/JiahuiYu/generative_inpainting/blob/master/inpaint_ops.py#L189 + max_width: 40 + max_times: 12 + min_times: 4 + + box_proba: 1 + box_kwargs: + margin: 0 + bbox_min_size: 30 + bbox_max_size: 128 + max_times: 1 + min_times: 1 + + segm_proba: 0 # not working yet due to RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the 'spawn' start method + + transform_variant: default + dataloader_kwargs: + batch_size: ${data.batch_size} + shuffle: True + num_workers: ${data.num_workers} + +val: + indir: ${location.data_root_dir}/val + img_suffix: .png + dataloader_kwargs: + batch_size: ${data.val_batch_size} + shuffle: False + num_workers: ${data.num_workers} + +#extra_val: +# random_thin_256: +# indir: ${location.data_root_dir}/extra_val/random_thin_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_medium_256: +# indir: ${location.data_root_dir}/extra_val/random_medium_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thick_256: +# indir: ${location.data_root_dir}/extra_val/random_thick_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thin_512: +# indir: ${location.data_root_dir}/extra_val/random_thin_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_medium_512: +# indir: ${location.data_root_dir}/extra_val/random_medium_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thick_512: +# indir: ${location.data_root_dir}/extra_val/random_thick_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# segm_256: +# indir: ${location.data_root_dir}/extra_val/segm_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# segm_512: +# indir: ${location.data_root_dir}/extra_val/segm_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} + +visual_test: + indir: ${location.data_root_dir}/visual_test + img_suffix: _input.png + pad_out_to_modulo: 32 + dataloader_kwargs: + batch_size: 1 + shuffle: False + num_workers: ${data.num_workers} diff --git a/lama/configs/training/data/abl-04-256-mh-dist-celeba.yaml b/lama/configs/training/data/abl-04-256-mh-dist-celeba.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d19cc76b97a035a94ce8ea5095a17f218d796c0a --- /dev/null +++ b/lama/configs/training/data/abl-04-256-mh-dist-celeba.yaml @@ -0,0 +1,43 @@ +# @package _group_ + +batch_size: 5 +val_batch_size: 3 +num_workers: 3 + +train: + indir: ${location.data_root_dir}/train_256 + out_size: 256 + mask_gen_kwargs: # probabilities do not need to sum to 1, they are re-normalized in mask generator + irregular_proba: 1 + irregular_kwargs: + max_angle: 4 + max_len: 200 + max_width: 100 + max_times: 5 + min_times: 1 + + box_proba: 1 + box_kwargs: + margin: 10 + bbox_min_size: 30 + bbox_max_size: 150 + max_times: 4 + min_times: 1 + + segm_proba: 0 + + transform_variant: no_augs + dataloader_kwargs: + batch_size: ${data.batch_size} + shuffle: True + num_workers: ${data.num_workers} + +val: + indir: ${location.data_root_dir}/val_256 + img_suffix: .png + dataloader_kwargs: + batch_size: ${data.val_batch_size} + shuffle: False + num_workers: ${data.num_workers} + +visual_test: null diff --git a/lama/configs/training/data/abl-04-256-mh-dist-web.yaml b/lama/configs/training/data/abl-04-256-mh-dist-web.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8e29b3e5fbfd6b9f18a1553a12720d76fd317f4 --- /dev/null +++ b/lama/configs/training/data/abl-04-256-mh-dist-web.yaml @@ -0,0 +1,110 @@ +# @package _group_ + +batch_size: 10 +val_batch_size: 2 +num_workers: 3 + +train: + kind: default_web + shuffle_buffer: 200 + indir: ${location.data_root_dir}/train_standard/part{00000..00039}.tar + out_size: 256 + mask_gen_kwargs: # probabilities do not need to sum to 1, they are re-normalized in mask generator + irregular_proba: 1 + irregular_kwargs: + max_angle: 4 + max_len: 200 + max_width: 100 + max_times: 5 + min_times: 1 + + box_proba: 1 + box_kwargs: + margin: 10 + bbox_min_size: 30 + bbox_max_size: 150 + max_times: 4 + min_times: 1 + + segm_proba: 0 + + transform_variant: distortions + dataloader_kwargs: + batch_size: ${data.batch_size} + shuffle: True + num_workers: ${data.num_workers} + +val: + indir: ${location.data_root_dir}/val + img_suffix: .png + dataloader_kwargs: + batch_size: ${data.val_batch_size} + shuffle: False + num_workers: ${data.num_workers} + +#extra_val: +# random_thin_256: +# indir: ${location.data_root_dir}/final_extra_val/random_thin_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_medium_256: +# indir: ${location.data_root_dir}/final_extra_val/random_medium_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thick_256: +# indir: ${location.data_root_dir}/final_extra_val/random_thick_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thin_512: +# indir: ${location.data_root_dir}/final_extra_val/random_thin_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_medium_512: +# indir: ${location.data_root_dir}/final_extra_val/random_medium_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thick_512: +# indir: ${location.data_root_dir}/final_extra_val/random_thick_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# segm_256: +# indir: ${location.data_root_dir}/final_extra_val/segm_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# segm_512: +# indir: ${location.data_root_dir}/final_extra_val/segm_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} + +visual_test: + indir: ${location.data_root_dir}/visual_test + img_suffix: _input.png + pad_out_to_modulo: 32 + dataloader_kwargs: + batch_size: 1 + shuffle: False + num_workers: ${data.num_workers} diff --git a/lama/configs/training/data/abl-04-256-mh-dist.yaml b/lama/configs/training/data/abl-04-256-mh-dist.yaml new file mode 100644 index 0000000000000000000000000000000000000000..203e6aa07980f54aa412f930283d32d3acda6c2d --- /dev/null +++ b/lama/configs/training/data/abl-04-256-mh-dist.yaml @@ -0,0 +1,108 @@ +# @package _group_ + +batch_size: 10 +val_batch_size: 2 +num_workers: 3 + +train: + indir: ${location.data_root_dir}/train + out_size: 256 + mask_gen_kwargs: # probabilities do not need to sum to 1, they are re-normalized in mask generator + irregular_proba: 1 + irregular_kwargs: + max_angle: 4 + max_len: 200 + max_width: 100 + max_times: 5 + min_times: 1 + + box_proba: 1 + box_kwargs: + margin: 10 + bbox_min_size: 30 + bbox_max_size: 150 + max_times: 4 + min_times: 1 + + segm_proba: 0 + + transform_variant: distortions + dataloader_kwargs: + batch_size: ${data.batch_size} + shuffle: True + num_workers: ${data.num_workers} + +val: + indir: ${location.data_root_dir}/val + img_suffix: .png + dataloader_kwargs: + batch_size: ${data.val_batch_size} + shuffle: False + num_workers: ${data.num_workers} + +#extra_val: +# random_thin_256: +# indir: ${location.data_root_dir}/extra_val/random_thin_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_medium_256: +# indir: ${location.data_root_dir}/extra_val/random_medium_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thick_256: +# indir: ${location.data_root_dir}/extra_val/random_thick_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thin_512: +# indir: ${location.data_root_dir}/extra_val/random_thin_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_medium_512: +# indir: ${location.data_root_dir}/extra_val/random_medium_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# random_thick_512: +# indir: ${location.data_root_dir}/extra_val/random_thick_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# segm_256: +# indir: ${location.data_root_dir}/extra_val/segm_256 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} +# segm_512: +# indir: ${location.data_root_dir}/extra_val/segm_512 +# img_suffix: .png +# dataloader_kwargs: +# batch_size: ${data.val_batch_size} +# shuffle: False +# num_workers: ${data.num_workers} + +visual_test: + indir: ${location.data_root_dir}/visual_test + img_suffix: .png + pad_out_to_modulo: 32 + dataloader_kwargs: + batch_size: 1 + shuffle: False + num_workers: ${data.num_workers} diff --git a/lama/configs/training/discriminator/pix2pixhd_nlayer.yaml b/lama/configs/training/discriminator/pix2pixhd_nlayer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df20421c579e51d3582c89d3871e6ccba79fc572 --- /dev/null +++ b/lama/configs/training/discriminator/pix2pixhd_nlayer.yaml @@ -0,0 +1,5 @@ +# @package _group_ +kind: pix2pixhd_nlayer +input_nc: 3 +ndf: 64 +n_layers: 4 diff --git a/lama/configs/training/evaluator/default_inpainted.yaml b/lama/configs/training/evaluator/default_inpainted.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33bede1b85c08376c10d00ea8f15c7df8733fb09 --- /dev/null +++ b/lama/configs/training/evaluator/default_inpainted.yaml @@ -0,0 +1,4 @@ +# @package _group_ +kind: default +inpainted_key: inpainted # if you want to evaluate before blending with original image by mask, set predicted_image +integral_kind: ssim_fid100_f1 diff --git a/lama/configs/training/generator/ffc_resnet_075.yaml b/lama/configs/training/generator/ffc_resnet_075.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0bac88f9ece517900938c489a7e36643d55c5e62 --- /dev/null +++ b/lama/configs/training/generator/ffc_resnet_075.yaml @@ -0,0 +1,23 @@ +# @package _group_ +kind: ffc_resnet +input_nc: 4 +output_nc: 3 +ngf: 64 +n_downsampling: 3 +n_blocks: 9 +add_out_act: sigmoid + +init_conv_kwargs: + ratio_gin: 0 + ratio_gout: 0 + enable_lfu: False + +downsample_conv_kwargs: + ratio_gin: ${generator.init_conv_kwargs.ratio_gout} + ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin} + enable_lfu: False + +resnet_conv_kwargs: + ratio_gin: 0.75 + ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin} + enable_lfu: False diff --git a/lama/configs/training/generator/pix2pixhd_global.yaml b/lama/configs/training/generator/pix2pixhd_global.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc3deb98c48151a37fcd3c6cffce764743be69f1 --- /dev/null +++ b/lama/configs/training/generator/pix2pixhd_global.yaml @@ -0,0 +1,8 @@ +# @package _group_ +kind: pix2pixhd_global +input_nc: 4 +output_nc: 3 +ngf: 64 +n_downsampling: 3 +n_blocks: 9 +conv_kind: default \ No newline at end of file diff --git a/lama/configs/training/generator/pix2pixhd_global_sigmoid.yaml b/lama/configs/training/generator/pix2pixhd_global_sigmoid.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a4f007e6c47d48f1d3eb4a83307f48c94c2c08b --- /dev/null +++ b/lama/configs/training/generator/pix2pixhd_global_sigmoid.yaml @@ -0,0 +1,9 @@ +# @package _group_ +kind: pix2pixhd_global +input_nc: 4 +output_nc: 3 +ngf: 64 +n_downsampling: 3 +n_blocks: 9 +conv_kind: default +add_out_act: sigmoid diff --git a/lama/configs/training/generator/pix2pixhd_multidilated_catin_4dil_9b.yaml b/lama/configs/training/generator/pix2pixhd_multidilated_catin_4dil_9b.yaml new file mode 100644 index 0000000000000000000000000000000000000000..28d10a567db87aacc2d9a0898383b383919880c9 --- /dev/null +++ b/lama/configs/training/generator/pix2pixhd_multidilated_catin_4dil_9b.yaml @@ -0,0 +1,12 @@ +# @package _group_ +kind: pix2pixhd_multidilated +input_nc: 4 +output_nc: 3 +ngf: 64 +n_downsampling: 3 +n_blocks: 9 +conv_kind: default +add_out_act: sigmoid +multidilation_kwargs: + comb_mode: cat_in + dilation_num: 4 diff --git a/lama/configs/training/hydra/no_time.yaml b/lama/configs/training/hydra/no_time.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37ed30d351f4367b46b96463ea9dfee46bb11c19 --- /dev/null +++ b/lama/configs/training/hydra/no_time.yaml @@ -0,0 +1,6 @@ +# @package _group_ +run: + dir: ${location.out_root_dir}/${env:USER}_${hydra:job.name}_${hydra:job.config_name}_${run_title} +sweep: + dir: ${hydra:run.dir}_sweep + subdir: ${hydra.job.num} diff --git a/lama/configs/training/hydra/overrides.yaml b/lama/configs/training/hydra/overrides.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e9e53f2ca81fde6b9584e4aa4583b23464914c9 --- /dev/null +++ b/lama/configs/training/hydra/overrides.yaml @@ -0,0 +1,6 @@ +# @package _group_ +run: + dir: ${location.out_root_dir}/${env:USER}_${now:%Y-%m-%d_%H-%M-%S}_${hydra:job.name}_${hydra:job.config_name}_${run_title} +sweep: + dir: ${hydra:run.dir}_sweep + subdir: ${hydra.job.num} diff --git a/lama/configs/training/lama-fourier-celeba.yaml b/lama/configs/training/lama-fourier-celeba.yaml new file mode 100644 index 0000000000000000000000000000000000000000..63e562487be733afa2ae37b034f738635dffa4ad --- /dev/null +++ b/lama/configs/training/lama-fourier-celeba.yaml @@ -0,0 +1,35 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: celeba + - data: abl-04-256-mh-dist-celeba + - generator: ffc_resnet_075 + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_celeba + - hydra: overrides \ No newline at end of file diff --git a/lama/configs/training/lama-fourier.yaml b/lama/configs/training/lama-fourier.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c8d3a92ebc8906d25df735f1438f04d6efc48fb --- /dev/null +++ b/lama/configs/training/lama-fourier.yaml @@ -0,0 +1,35 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: ffc_resnet_075 + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides \ No newline at end of file diff --git a/lama/configs/training/lama-regular-celeba.yaml b/lama/configs/training/lama-regular-celeba.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dd13ecc9cd4d4271cf62c85cc87b18a8bf9e6129 --- /dev/null +++ b/lama/configs/training/lama-regular-celeba.yaml @@ -0,0 +1,35 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: celeba + - data: abl-04-256-mh-dist-celeba + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final_celeba + - hydra: overrides diff --git a/lama/configs/training/lama-regular.yaml b/lama/configs/training/lama-regular.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0412c4aafe292f7ec6c8ce284c6803d679620293 --- /dev/null +++ b/lama/configs/training/lama-regular.yaml @@ -0,0 +1,35 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-04-256-mh-dist + - generator: pix2pixhd_global_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides diff --git a/lama/configs/training/lama_small_train_masks.yaml b/lama/configs/training/lama_small_train_masks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf15faa5d8fca8c442dcc652ed6348cd1c4e5ca4 --- /dev/null +++ b/lama/configs/training/lama_small_train_masks.yaml @@ -0,0 +1,36 @@ +run_title: '' + +training_model: + kind: default + visualize_each_iters: 1000 + concat_mask: true + store_discr_outputs_for_vis: true + +losses: + l1: + weight_missing: 0 + weight_known: 10 + perceptual: + weight: 0 + adversarial: + kind: r1 + weight: 10 + gp_coef: 0.001 + mask_as_fake_target: true + allow_scale_mask: true + feature_matching: + weight: 100 + resnet_pl: + weight: 30 + weights_path: ${env:TORCH_HOME} + +defaults: + - location: docker + - data: abl-02-thin-bb + - generator: pix2pixhd_sigmoid + - discriminator: pix2pixhd_nlayer + - optimizers: default_optimizers + - visualizer: directory + - evaluator: default_inpainted + - trainer: any_gpu_large_ssim_ddp_final + - hydra: overrides \ No newline at end of file diff --git a/lama/configs/training/location/celeba_example.yaml b/lama/configs/training/location/celeba_example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..117fe8a9324d1de5908bdac175985837d10abc75 --- /dev/null +++ b/lama/configs/training/location/celeba_example.yaml @@ -0,0 +1,5 @@ +# @package _group_ +data_root_dir: /home/user/lama/celeba-hq-dataset/ +out_root_dir: /home/user/lama/experiments/ +tb_dir: /home/user/lama/tb_logs/ +pretrained_models: /home/user/lama/ diff --git a/lama/configs/training/location/docker.yaml b/lama/configs/training/location/docker.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5da6a4a452c5e63f6548b60f18861cf66ab738ff --- /dev/null +++ b/lama/configs/training/location/docker.yaml @@ -0,0 +1,5 @@ +# @package _group_ +data_root_dir: /data/data +out_root_dir: /data/experiments +tb_dir: /data/tb_logs +pretrained_models: /some_path diff --git a/lama/configs/training/location/places_example.yaml b/lama/configs/training/location/places_example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97a9f9b58c7f2fd5ee0b6b50da6a366d91227a15 --- /dev/null +++ b/lama/configs/training/location/places_example.yaml @@ -0,0 +1,5 @@ +# @package _group_ +data_root_dir: /home/user/inpainting-lama/places_standard_dataset/ +out_root_dir: /home/user/inpainting-lama/experiments +tb_dir: /home/user/inpainting-lama/tb_logs +pretrained_models: /home/user/inpainting-lama/ diff --git a/lama/configs/training/optimizers/default_optimizers.yaml b/lama/configs/training/optimizers/default_optimizers.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d85d1da74c2e30ea3378ed8e0ae43407767c02d --- /dev/null +++ b/lama/configs/training/optimizers/default_optimizers.yaml @@ -0,0 +1,7 @@ +# @package _group_ +generator: + kind: adam + lr: 0.001 +discriminator: + kind: adam + lr: 0.0001 diff --git a/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final.yaml b/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5da9ed3fb8946cb82d5c4e6dba6f882a929619bc --- /dev/null +++ b/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final.yaml @@ -0,0 +1,31 @@ +# @package _group_ +kwargs: + gpus: -1 + accelerator: ddp + max_epochs: 40 + gradient_clip_val: 1 + log_gpu_memory: None # set to min_max or all for debug + limit_train_batches: 25000 + val_check_interval: ${trainer.kwargs.limit_train_batches} + # fast_dev_run: True # uncomment for faster debug + # track_grad_norm: 2 # uncomment to track L2 gradients norm + log_every_n_steps: 250 + precision: 32 +# precision: 16 +# amp_backend: native +# amp_level: O1 + # resume_from_checkpoint: path # override via command line trainer.resume_from_checkpoint=path_to_checkpoint + terminate_on_nan: False + # auto_scale_batch_size: True # uncomment to find largest batch size + check_val_every_n_epoch: 1 + num_sanity_val_steps: 8 +# limit_val_batches: 1000000 + replace_sampler_ddp: False + +checkpoint_kwargs: + verbose: True + save_top_k: 5 + save_last: True + period: 1 + monitor: val_ssim_fid100_f1_total_mean + mode: max \ No newline at end of file diff --git a/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final_benchmark.yaml b/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final_benchmark.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd72e41fe97b2233bd54be5de2973b6030459b29 --- /dev/null +++ b/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final_benchmark.yaml @@ -0,0 +1,32 @@ +# @package _group_ +kwargs: + gpus: -1 + accelerator: ddp + max_epochs: 40 + gradient_clip_val: 1 + log_gpu_memory: None # set to min_max or all for debug + limit_train_batches: 25000 + val_check_interval: ${trainer.kwargs.limit_train_batches} + # fast_dev_run: True # uncomment for faster debug + # track_grad_norm: 2 # uncomment to track L2 gradients norm + log_every_n_steps: 250 + precision: 32 +# precision: 16 +# amp_backend: native +# amp_level: O1 + # resume_from_checkpoint: path # override via command line trainer.resume_from_checkpoint=path_to_checkpoint + terminate_on_nan: False + # auto_scale_batch_size: True # uncomment to find largest batch size + check_val_every_n_epoch: 1 + num_sanity_val_steps: 8 +# limit_val_batches: 1000000 + replace_sampler_ddp: False + benchmark: True + +checkpoint_kwargs: + verbose: True + save_top_k: 5 + save_last: True + period: 1 + monitor: val_ssim_fid100_f1_total_mean + mode: max diff --git a/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final_celeba.yaml b/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final_celeba.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c175d006e9206449170ab0aed727bbb9a1034c08 --- /dev/null +++ b/lama/configs/training/trainer/any_gpu_large_ssim_ddp_final_celeba.yaml @@ -0,0 +1,22 @@ +# @package _group_ +kwargs: + gpus: -1 + accelerator: ddp + max_epochs: 40 + gradient_clip_val: 1 + log_gpu_memory: None + limit_train_batches: 25000 + val_check_interval: 2600 + log_every_n_steps: 250 + precision: 32 + terminate_on_nan: False + check_val_every_n_epoch: 1 + num_sanity_val_steps: 8 + replace_sampler_ddp: False +checkpoint_kwargs: + verbose: True + save_top_k: 5 + save_last: True + period: 1 + monitor: val_ssim_fid100_f1_total_mean + mode: max \ No newline at end of file diff --git a/lama/configs/training/visualizer/directory.yaml b/lama/configs/training/visualizer/directory.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff1880d4e033bd6a5709d837f84e60b33b18254c --- /dev/null +++ b/lama/configs/training/visualizer/directory.yaml @@ -0,0 +1,12 @@ +# @package _group_ +kind: directory +outdir: samples +key_order: + - image + - predicted_image + - discr_output_fake + - discr_output_real + - inpainted +rescale_keys: + - discr_output_fake + - discr_output_real diff --git a/lama/fetch_data/celebahq_dataset_prepare.sh b/lama/fetch_data/celebahq_dataset_prepare.sh new file mode 100644 index 0000000000000000000000000000000000000000..6d2ba9a6265c0d5fa580035952a1f568dd8d9e44 --- /dev/null +++ b/lama/fetch_data/celebahq_dataset_prepare.sh @@ -0,0 +1,37 @@ +mkdir celeba-hq-dataset + +unzip data256x256.zip -d celeba-hq-dataset/ + +# Reindex +for i in `echo {00001..30000}` +do + mv 'celeba-hq-dataset/data256x256/'$i'.jpg' 'celeba-hq-dataset/data256x256/'$[10#$i - 1]'.jpg' +done + + +# Split: split train -> train & val +cat fetch_data/train_shuffled.flist | shuf > celeba-hq-dataset/temp_train_shuffled.flist +cat celeba-hq-dataset/temp_train_shuffled.flist | head -n 2000 > celeba-hq-dataset/val_shuffled.flist +cat celeba-hq-dataset/temp_train_shuffled.flist | tail -n +2001 > celeba-hq-dataset/train_shuffled.flist +cat fetch_data/val_shuffled.flist > celeba-hq-dataset/visual_test_shuffled.flist + +mkdir celeba-hq-dataset/train_256/ +mkdir celeba-hq-dataset/val_source_256/ +mkdir celeba-hq-dataset/visual_test_source_256/ + +cat celeba-hq-dataset/train_shuffled.flist | xargs -I {} mv celeba-hq-dataset/data256x256/{} celeba-hq-dataset/train_256/ +cat celeba-hq-dataset/val_shuffled.flist | xargs -I {} mv celeba-hq-dataset/data256x256/{} celeba-hq-dataset/val_source_256/ +cat celeba-hq-dataset/visual_test_shuffled.flist | xargs -I {} mv celeba-hq-dataset/data256x256/{} celeba-hq-dataset/visual_test_source_256/ + + +# create location config celeba.yaml +PWD=$(pwd) +DATASET=${PWD}/celeba-hq-dataset +CELEBA=${PWD}/configs/training/location/celeba.yaml + +touch $CELEBA +echo "# @package _group_" >> $CELEBA +echo "data_root_dir: ${DATASET}/" >> $CELEBA +echo "out_root_dir: ${PWD}/experiments/" >> $CELEBA +echo "tb_dir: ${PWD}/tb_logs/" >> $CELEBA +echo "pretrained_models: ${PWD}/" >> $CELEBA diff --git a/lama/fetch_data/celebahq_gen_masks.sh b/lama/fetch_data/celebahq_gen_masks.sh new file mode 100644 index 0000000000000000000000000000000000000000..190ccfd53038711df34d402ecf1ee729a7c1e254 --- /dev/null +++ b/lama/fetch_data/celebahq_gen_masks.sh @@ -0,0 +1,29 @@ +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thick_256.yaml \ +celeba-hq-dataset/val_source_256/ \ +celeba-hq-dataset/val_256/random_thick_256/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thin_256.yaml \ +celeba-hq-dataset/val_source_256/ \ +celeba-hq-dataset/val_256/random_thin_256/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_medium_256.yaml \ +celeba-hq-dataset/val_source_256/ \ +celeba-hq-dataset/val_256/random_medium_256/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thick_256.yaml \ +celeba-hq-dataset/visual_test_source_256/ \ +celeba-hq-dataset/visual_test_256/random_thick_256/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thin_256.yaml \ +celeba-hq-dataset/visual_test_source_256/ \ +celeba-hq-dataset/visual_test_256/random_thin_256/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_medium_256.yaml \ +celeba-hq-dataset/visual_test_source_256/ \ +celeba-hq-dataset/visual_test_256/random_medium_256/ diff --git a/lama/fetch_data/eval_sampler.py b/lama/fetch_data/eval_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..7cffdbc969e3f5d5f18f589c29f70abd240f3986 --- /dev/null +++ b/lama/fetch_data/eval_sampler.py @@ -0,0 +1,20 @@ +import os +import random + +val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/' +list_of_random_val_files = os.path.abspath('.') + '/places_standard_dataset/original/eval_random_files.txt' +val_files = [val_files_path + image for image in os.listdir(val_files_path)] + +print(f'Sampling 30000 images out of {len(val_files)} images in {val_files_path}' + \ + f'and put their paths to {list_of_random_val_files}') + +print('In our paper we evaluate trained models on these 30k sampled (mask,image) pairs in our paper (check Sup. mat.)') + +random.shuffle(val_files) +val_files_random = val_files[0:30000] + +with open(list_of_random_val_files, 'w') as fw: + for filename in val_files_random: + fw.write(filename+'\n') +print('...done') + diff --git a/lama/fetch_data/places_challenge_train_download.sh b/lama/fetch_data/places_challenge_train_download.sh new file mode 100755 index 0000000000000000000000000000000000000000..f5317b44d16a2f295a56a52d1ce005605a137be7 --- /dev/null +++ b/lama/fetch_data/places_challenge_train_download.sh @@ -0,0 +1,14 @@ +mkdir places_challenge_dataset + + +declare -a TARPARTS +for i in {a..z} +do + TARPARTS[${#TARPARTS[@]}]="http://data.csail.mit.edu/places/places365/train_large_split/${i}.tar" +done +ls +printf "%s\n" "${TARPARTS[@]}" > places_challenge_dataset/places365_train.txt + +cd places_challenge_dataset/ +xargs -a places365_train.txt -n 1 -P 8 wget [...] +ls *.tar | xargs -i tar xvf {} diff --git a/lama/fetch_data/places_standard_evaluation_prepare_data.sh b/lama/fetch_data/places_standard_evaluation_prepare_data.sh new file mode 100755 index 0000000000000000000000000000000000000000..2962ac8c843c84a467679887cb4aab60bd73917a --- /dev/null +++ b/lama/fetch_data/places_standard_evaluation_prepare_data.sh @@ -0,0 +1,52 @@ +# 0. folder preparation +mkdir -p places_standard_dataset/evaluation/hires/ +mkdir -p places_standard_dataset/evaluation/random_thick_512/ +mkdir -p places_standard_dataset/evaluation/random_thin_512/ +mkdir -p places_standard_dataset/evaluation/random_medium_512/ +mkdir -p places_standard_dataset/evaluation/random_thick_256/ +mkdir -p places_standard_dataset/evaluation/random_thin_256/ +mkdir -p places_standard_dataset/evaluation/random_medium_256/ + +# 1. sample 30000 new images +OUT=$(python3 fetch_data/eval_sampler.py) +echo ${OUT} + +FILELIST=$(cat places_standard_dataset/original/eval_random_files.txt) +for i in $FILELIST +do + $(cp ${i} places_standard_dataset/evaluation/hires/) +done + + +# 2. generate all kinds of masks + +# all 512 +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thick_512.yaml \ +places_standard_dataset/evaluation/hires \ +places_standard_dataset/evaluation/random_thick_512/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thin_512.yaml \ +places_standard_dataset/evaluation/hires \ +places_standard_dataset/evaluation/random_thin_512/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_medium_512.yaml \ +places_standard_dataset/evaluation/hires \ +places_standard_dataset/evaluation/random_medium_512/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thick_256.yaml \ +places_standard_dataset/evaluation/hires \ +places_standard_dataset/evaluation/random_thick_256/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thin_256.yaml \ +places_standard_dataset/evaluation/hires \ +places_standard_dataset/evaluation/random_thin_256/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_medium_256.yaml \ +places_standard_dataset/evaluation/hires \ +places_standard_dataset/evaluation/random_medium_256/ diff --git a/lama/fetch_data/places_standard_test_val_gen_masks.sh b/lama/fetch_data/places_standard_test_val_gen_masks.sh new file mode 100755 index 0000000000000000000000000000000000000000..4654779790564f4aba73fa1629ca6899697ad150 --- /dev/null +++ b/lama/fetch_data/places_standard_test_val_gen_masks.sh @@ -0,0 +1,13 @@ +mkdir -p places_standard_dataset/val/ +mkdir -p places_standard_dataset/visual_test/ + + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thick_512.yaml \ +places_standard_dataset/val_hires/ \ +places_standard_dataset/val/ + +python3 bin/gen_mask_dataset.py \ +$(pwd)/configs/data_gen/random_thick_512.yaml \ +places_standard_dataset/visual_test_hires/ \ +places_standard_dataset/visual_test/ \ No newline at end of file diff --git a/lama/fetch_data/places_standard_test_val_prepare.sh b/lama/fetch_data/places_standard_test_val_prepare.sh new file mode 100755 index 0000000000000000000000000000000000000000..c0aa15008463c9fb881e0255c45994394a515806 --- /dev/null +++ b/lama/fetch_data/places_standard_test_val_prepare.sh @@ -0,0 +1,5 @@ +mkdir -p places_standard_dataset/original/test/ +tar -xvf test_large.tar -C places_standard_dataset/original/test/ + +mkdir -p places_standard_dataset/original/val/ +tar -xvf val_large.tar -C places_standard_dataset/original/val/ diff --git a/lama/fetch_data/places_standard_test_val_sample.sh b/lama/fetch_data/places_standard_test_val_sample.sh new file mode 100755 index 0000000000000000000000000000000000000000..7b581f457e32e339d7a480845de27d37d0171322 --- /dev/null +++ b/lama/fetch_data/places_standard_test_val_sample.sh @@ -0,0 +1,22 @@ +mkdir -p places_standard_dataset/val_hires/ +mkdir -p places_standard_dataset/visual_test_hires/ + + +# randomly sample images for test and vis +OUT=$(python3 fetch_data/sampler.py) +echo ${OUT} + +FILELIST=$(cat places_standard_dataset/original/test_random_files.txt) + +for i in $FILELIST +do + $(cp ${i} places_standard_dataset/val_hires/) +done + +FILELIST=$(cat places_standard_dataset/original/val_random_files.txt) + +for i in $FILELIST +do + $(cp ${i} places_standard_dataset/visual_test_hires/) +done + diff --git a/lama/fetch_data/places_standard_train_prepare.sh b/lama/fetch_data/places_standard_train_prepare.sh new file mode 100644 index 0000000000000000000000000000000000000000..aaf429243c5b05c9e3319b01842992cb2ab4c06c --- /dev/null +++ b/lama/fetch_data/places_standard_train_prepare.sh @@ -0,0 +1,16 @@ +mkdir -p places_standard_dataset/train + +# untar without folder structure +tar -xvf train_large_places365standard.tar -C places_standard_dataset/train + +# create location config places.yaml +PWD=$(pwd) +DATASET=${PWD}/places_standard_dataset +PLACES=${PWD}/configs/training/location/places_standard.yaml + +touch $PLACES +echo "# @package _group_" >> $PLACES +echo "data_root_dir: ${DATASET}/" >> $PLACES +echo "out_root_dir: ${PWD}/experiments/" >> $PLACES +echo "tb_dir: ${PWD}/tb_logs/" >> $PLACES +echo "pretrained_models: ${PWD}/" >> $PLACES diff --git a/lama/fetch_data/sampler.py b/lama/fetch_data/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..9bf48b7fab615d404be55cda5c645bf91ad65d18 --- /dev/null +++ b/lama/fetch_data/sampler.py @@ -0,0 +1,42 @@ +import os +import random + +test_files_path = os.path.abspath('.') + '/places_standard_dataset/original/test/' +list_of_random_test_files = os.path.abspath('.') + '/places_standard_dataset/original/test_random_files.txt' + +test_files = [ + test_files_path + image for image in os.listdir(test_files_path) +] + +print(f'Sampling 2000 images out of {len(test_files)} images in {test_files_path}' + \ + f'and put their paths to {list_of_random_test_files}') +print('Our training procedure will pick best checkpoints according to metrics, computed on these images.') + +random.shuffle(test_files) +test_files_random = test_files[0:2000] +with open(list_of_random_test_files, 'w') as fw: + for filename in test_files_random: + fw.write(filename+'\n') +print('...done') + + +# -------------------------------- + +val_files_path = os.path.abspath('.') + '/places_standard_dataset/original/val/' +list_of_random_val_files = os.path.abspath('.') + '/places_standard_dataset/original/val_random_files.txt' + +val_files = [ + val_files_path + image for image in os.listdir(val_files_path) +] + +print(f'Sampling 100 images out of {len(val_files)} in {val_files_path} ' + \ + f'and put their paths to {list_of_random_val_files}') +print('We use these images for visual check up of evolution of inpainting algorithm epoch to epoch' ) + +random.shuffle(val_files) +val_files_random = val_files[0:100] +with open(list_of_random_val_files, 'w') as fw: + for filename in val_files_random: + fw.write(filename+'\n') +print('...done') + diff --git a/lama/fetch_data/train_shuffled.flist b/lama/fetch_data/train_shuffled.flist new file mode 100644 index 0000000000000000000000000000000000000000..240211cbb14cfa0418778a814ce4b578940c676b --- /dev/null +++ b/lama/fetch_data/train_shuffled.flist @@ -0,0 +1,28000 @@ +27049.jpg +17547.jpg +23248.jpg +29613.jpg +7055.jpg +21404.jpg +8928.jpg +3579.jpg +10811.jpg +14556.jpg +15131.jpg +15634.jpg +15805.jpg +1043.jpg +22433.jpg +14652.jpg +15942.jpg +16587.jpg +7641.jpg +4943.jpg +26975.jpg +15746.jpg +5382.jpg +23459.jpg +24104.jpg +6964.jpg +12555.jpg +11762.jpg +11977.jpg +14251.jpg +29810.jpg +28323.jpg +849.jpg +20543.jpg +44.jpg +9347.jpg +28557.jpg +28344.jpg +8645.jpg +25718.jpg +7276.jpg +12631.jpg +6590.jpg +16221.jpg +27425.jpg +11434.jpg +4346.jpg +5436.jpg +6978.jpg +24833.jpg +16268.jpg +16593.jpg +3219.jpg +20812.jpg +12628.jpg +14987.jpg +5583.jpg +23479.jpg +17235.jpg +24650.jpg +23115.jpg +2773.jpg +3116.jpg +8759.jpg +22297.jpg +3471.jpg +27254.jpg +28922.jpg +29154.jpg +13172.jpg +11186.jpg +28396.jpg +9016.jpg +568.jpg +1777.jpg +10695.jpg +10164.jpg +10571.jpg +5349.jpg +13215.jpg +13390.jpg +4166.jpg +29336.jpg +2024.jpg +20913.jpg +27210.jpg +4701.jpg +4854.jpg +1485.jpg +17527.jpg +14392.jpg +26456.jpg +28991.jpg +13485.jpg +18078.jpg +13364.jpg +24403.jpg +6121.jpg +4906.jpg +14398.jpg +16473.jpg +22690.jpg +5158.jpg +15334.jpg +16997.jpg +21671.jpg +6889.jpg +7961.jpg +26533.jpg +15912.jpg +1192.jpg +24039.jpg +19974.jpg +21681.jpg +28138.jpg +3931.jpg +28422.jpg +21992.jpg +24027.jpg +13693.jpg +15981.jpg +10526.jpg +22912.jpg +4532.jpg +9729.jpg +25346.jpg +28048.jpg +14376.jpg +8079.jpg +5498.jpg +11225.jpg +22147.jpg +22730.jpg +19218.jpg +11602.jpg +14810.jpg +10555.jpg +6838.jpg +13727.jpg +5077.jpg +21958.jpg +10682.jpg +28509.jpg +26434.jpg +2965.jpg +28477.jpg +4452.jpg +18846.jpg +25066.jpg +20679.jpg +13310.jpg +9574.jpg +28880.jpg +9806.jpg +3154.jpg +13916.jpg +18807.jpg +26890.jpg +21990.jpg +6871.jpg +27561.jpg +19997.jpg +15729.jpg +26522.jpg +23355.jpg +9875.jpg +16077.jpg +28500.jpg +29541.jpg +27219.jpg +22021.jpg +798.jpg +9482.jpg +4149.jpg +5193.jpg +27739.jpg +12289.jpg +19934.jpg +27730.jpg +10847.jpg +16216.jpg +22142.jpg +24384.jpg +26164.jpg +22856.jpg +5679.jpg +18605.jpg +4219.jpg +12398.jpg +13897.jpg +7450.jpg +5532.jpg +19161.jpg +9769.jpg +24700.jpg +21189.jpg +5246.jpg +14242.jpg +4333.jpg +8442.jpg +3877.jpg +5348.jpg +11820.jpg +23529.jpg +7087.jpg +10542.jpg +13921.jpg +8089.jpg +3086.jpg +15355.jpg +7047.jpg +16284.jpg +17638.jpg +4727.jpg +77.jpg +19419.jpg +27346.jpg +23417.jpg +19936.jpg +7111.jpg +5.jpg +21222.jpg +23065.jpg +1482.jpg +3296.jpg +4945.jpg +28734.jpg +25672.jpg +7134.jpg +275.jpg +27232.jpg +11637.jpg +29706.jpg +3167.jpg +18233.jpg +11780.jpg +17834.jpg +5954.jpg +27869.jpg +2989.jpg +21593.jpg +28322.jpg +18978.jpg +3697.jpg +28931.jpg +5379.jpg +21834.jpg +29686.jpg +22143.jpg +2941.jpg +20233.jpg +2987.jpg +766.jpg +14382.jpg +7095.jpg +9981.jpg +11016.jpg +15635.jpg +8418.jpg +27449.jpg +8106.jpg +10169.jpg +11712.jpg +14029.jpg +20635.jpg +1435.jpg +18321.jpg +5908.jpg +28779.jpg +759.jpg +9429.jpg +28992.jpg +18955.jpg +21156.jpg +13630.jpg +11548.jpg +10136.jpg +14775.jpg +1406.jpg +16323.jpg +26621.jpg +15224.jpg +3947.jpg +6952.jpg +29137.jpg +442.jpg +15407.jpg +3241.jpg +23156.jpg +12934.jpg +860.jpg +24174.jpg +5176.jpg +2924.jpg +16922.jpg +5563.jpg +17647.jpg +8865.jpg +8176.jpg +27.jpg +23579.jpg +26290.jpg +18216.jpg +28403.jpg +29196.jpg +7817.jpg +5890.jpg +27444.jpg +2731.jpg +16568.jpg +25754.jpg +22331.jpg +5304.jpg +3140.jpg +5902.jpg +129.jpg +20485.jpg +7639.jpg +21202.jpg +8021.jpg +22624.jpg +29946.jpg +28458.jpg +333.jpg +3897.jpg +9903.jpg +14203.jpg +25550.jpg +28412.jpg +8789.jpg +18858.jpg +27505.jpg +18773.jpg +1446.jpg +2110.jpg +25796.jpg +6169.jpg +23585.jpg +3459.jpg +26554.jpg +22174.jpg +22326.jpg +24526.jpg +24895.jpg +13351.jpg +15032.jpg +1859.jpg +6928.jpg +29027.jpg +17388.jpg +29497.jpg +5889.jpg +15954.jpg +24872.jpg +18327.jpg +22322.jpg +15641.jpg +6439.jpg +6691.jpg +22688.jpg +4179.jpg +18356.jpg +3852.jpg +6751.jpg +1187.jpg +17583.jpg +23226.jpg +22402.jpg +24936.jpg +21839.jpg +5115.jpg +13907.jpg +5730.jpg +16493.jpg +22437.jpg +29733.jpg +15134.jpg +279.jpg +2296.jpg +15691.jpg +16007.jpg +23792.jpg +5866.jpg +5769.jpg +3264.jpg +10859.jpg +14840.jpg +8201.jpg +29321.jpg +12305.jpg +24327.jpg +3299.jpg +27937.jpg +25534.jpg +26470.jpg +11062.jpg +12158.jpg +19675.jpg +7950.jpg +1902.jpg +19809.jpg +21874.jpg +3304.jpg +28166.jpg +14471.jpg +9687.jpg +29033.jpg +25973.jpg +3552.jpg +27777.jpg +27584.jpg +12170.jpg +10957.jpg +4354.jpg +26379.jpg +8997.jpg +20711.jpg +21169.jpg +19663.jpg +1434.jpg +28563.jpg +5154.jpg +22409.jpg +24664.jpg +3770.jpg +12184.jpg +10460.jpg +18418.jpg +25597.jpg +7449.jpg +20335.jpg +24587.jpg +23102.jpg +3511.jpg +8946.jpg +16062.jpg +26359.jpg +24059.jpg +15079.jpg +18213.jpg +6932.jpg +24194.jpg +28728.jpg +22969.jpg +14698.jpg +25690.jpg +9656.jpg +2295.jpg +27963.jpg +16704.jpg +5276.jpg +28862.jpg +15197.jpg +814.jpg +26779.jpg +12051.jpg +8781.jpg +17606.jpg +2085.jpg +27804.jpg +12038.jpg +29233.jpg +29091.jpg +21502.jpg +29590.jpg +21296.jpg +26267.jpg +14959.jpg +25164.jpg +13134.jpg +4865.jpg +6878.jpg +14143.jpg +23872.jpg +11671.jpg +16254.jpg +19159.jpg +1854.jpg +3017.jpg +22937.jpg +14574.jpg +29642.jpg +13140.jpg +22186.jpg +23652.jpg +7240.jpg +23073.jpg +20070.jpg +2485.jpg +18125.jpg +11058.jpg +2193.jpg +1246.jpg +11681.jpg +11278.jpg +25688.jpg +27325.jpg +164.jpg +7930.jpg +18776.jpg +27264.jpg +19110.jpg +16383.jpg +8604.jpg +4541.jpg +5408.jpg +16008.jpg +18766.jpg +6001.jpg +16599.jpg +29370.jpg +22965.jpg +9578.jpg +28144.jpg +20752.jpg +3891.jpg +26486.jpg +367.jpg +5689.jpg +5694.jpg +784.jpg +18359.jpg +8101.jpg +21638.jpg +22908.jpg +29515.jpg +24238.jpg +27690.jpg +2008.jpg +27885.jpg +6827.jpg +7380.jpg +506.jpg +24603.jpg +24943.jpg +11822.jpg +22813.jpg +11334.jpg +27340.jpg +25012.jpg +18608.jpg +11633.jpg +27705.jpg +9845.jpg +21692.jpg +25246.jpg +29402.jpg +20906.jpg +9446.jpg +19310.jpg +12160.jpg +18521.jpg +18513.jpg +9593.jpg +26271.jpg +4839.jpg +16620.jpg +16489.jpg +1550.jpg +5645.jpg +3856.jpg +16331.jpg +3441.jpg +24132.jpg +28156.jpg +22002.jpg +14803.jpg +18511.jpg +1483.jpg +28598.jpg +29796.jpg +11926.jpg +10986.jpg +17691.jpg +28093.jpg +10352.jpg +20304.jpg +12539.jpg +16703.jpg +19548.jpg +1333.jpg +9372.jpg +25906.jpg +24583.jpg +24792.jpg +21568.jpg +6646.jpg +29070.jpg +17035.jpg +732.jpg +19407.jpg +17404.jpg +28920.jpg +4946.jpg +23558.jpg +12925.jpg +3668.jpg +12612.jpg +12259.jpg +17711.jpg +21894.jpg +19457.jpg +23680.jpg +21334.jpg +22347.jpg +14486.jpg +23974.jpg +10369.jpg +28831.jpg +10815.jpg +12755.jpg +9557.jpg +5120.jpg +2124.jpg +25779.jpg +439.jpg +10077.jpg +4520.jpg +21108.jpg +17287.jpg +18086.jpg +9122.jpg +23519.jpg +9391.jpg +8028.jpg +3077.jpg +12944.jpg +29105.jpg +27077.jpg +23425.jpg +26990.jpg +14199.jpg +1772.jpg +23146.jpg +4609.jpg +8599.jpg +536.jpg +5978.jpg +2817.jpg +24969.jpg +11499.jpg +10855.jpg +12313.jpg +4365.jpg +18254.jpg +26585.jpg +16809.jpg +8861.jpg +29454.jpg +5224.jpg +7909.jpg +15921.jpg +1986.jpg +19112.jpg +25915.jpg +19739.jpg +23795.jpg +28642.jpg +8881.jpg +22476.jpg +7754.jpg +300.jpg +2493.jpg +8336.jpg +4356.jpg +12301.jpg +11660.jpg +3427.jpg +24722.jpg +16218.jpg +5047.jpg +2894.jpg +15292.jpg +1832.jpg +27946.jpg +1844.jpg +21792.jpg +8025.jpg +2217.jpg +21101.jpg +14774.jpg +12025.jpg +8061.jpg +7492.jpg +1127.jpg +9540.jpg +6854.jpg +11900.jpg +16441.jpg +5111.jpg +27835.jpg +24480.jpg +16853.jpg +7362.jpg +17517.jpg +2497.jpg +14055.jpg +22353.jpg +29478.jpg +15793.jpg +4336.jpg +14296.jpg +7857.jpg +13198.jpg +22672.jpg +7674.jpg +15861.jpg +18483.jpg +21137.jpg +6620.jpg +7783.jpg +28658.jpg +24623.jpg +24129.jpg +17182.jpg +4169.jpg +7388.jpg +26268.jpg +1372.jpg +27429.jpg +19137.jpg +12241.jpg +23017.jpg +16150.jpg +25946.jpg +10353.jpg +6634.jpg +13184.jpg +2536.jpg +26111.jpg +1629.jpg +22873.jpg +20244.jpg +8287.jpg +25240.jpg +18375.jpg +892.jpg +10998.jpg +21029.jpg +9701.jpg +531.jpg +17939.jpg +28953.jpg +6223.jpg +5820.jpg +28911.jpg +1295.jpg +7092.jpg +15161.jpg +22513.jpg +15720.jpg +19642.jpg +10823.jpg +27161.jpg +27895.jpg +21616.jpg +26021.jpg +4456.jpg +23886.jpg +18328.jpg +22604.jpg +6898.jpg +28193.jpg +13555.jpg +22754.jpg +12942.jpg +4029.jpg +21658.jpg +24270.jpg +7136.jpg +4974.jpg +17167.jpg +1894.jpg +15864.jpg +23092.jpg +4153.jpg +7755.jpg +2663.jpg +21474.jpg +19243.jpg +19546.jpg +28848.jpg +2648.jpg +29696.jpg +11833.jpg +28517.jpg +8122.jpg +1525.jpg +7204.jpg +13739.jpg +26786.jpg +29615.jpg +19309.jpg +28137.jpg +23722.jpg +8009.jpg +24284.jpg +25869.jpg +22164.jpg +17919.jpg +15340.jpg +28501.jpg +14740.jpg +25045.jpg +14526.jpg +12437.jpg +18331.jpg +11210.jpg +2007.jpg +29190.jpg +864.jpg +9420.jpg +15362.jpg +22771.jpg +1659.jpg +3190.jpg +8824.jpg +8325.jpg +19953.jpg +25520.jpg +27591.jpg +18775.jpg +16488.jpg +13281.jpg +9257.jpg +13188.jpg +27859.jpg +61.jpg +27653.jpg +29533.jpg +13950.jpg +8528.jpg +8045.jpg +5473.jpg +29872.jpg +25943.jpg +22172.jpg +28343.jpg +9225.jpg +10687.jpg +27947.jpg +19712.jpg +10929.jpg +16110.jpg +24100.jpg +11089.jpg +15931.jpg +7840.jpg +24882.jpg +16270.jpg +28293.jpg +23116.jpg +12669.jpg +25810.jpg +1514.jpg +23678.jpg +24937.jpg +1149.jpg +10007.jpg +20571.jpg +24200.jpg +10118.jpg +13127.jpg +14658.jpg +18624.jpg +25086.jpg +24734.jpg +26403.jpg +19955.jpg +12349.jpg +13391.jpg +6005.jpg +9727.jpg +10583.jpg +10446.jpg +12729.jpg +5271.jpg +25863.jpg +25982.jpg +13083.jpg +4902.jpg +15823.jpg +20877.jpg +19880.jpg +14258.jpg +10592.jpg +26835.jpg +7365.jpg +7606.jpg +494.jpg +29554.jpg +6.jpg +8494.jpg +4057.jpg +13116.jpg +4596.jpg +17262.jpg +19708.jpg +19210.jpg +29841.jpg +11993.jpg +9006.jpg +10087.jpg +2820.jpg +25106.jpg +25354.jpg +26623.jpg +19937.jpg +22549.jpg +26700.jpg +26305.jpg +2272.jpg +7530.jpg +16307.jpg +28689.jpg +26204.jpg +19278.jpg +16532.jpg +15400.jpg +11581.jpg +28104.jpg +6338.jpg +9156.jpg +9877.jpg +7013.jpg +13261.jpg +5928.jpg +12764.jpg +544.jpg +20567.jpg +24198.jpg +16212.jpg +6608.jpg +25619.jpg +17732.jpg +4257.jpg +25885.jpg +25000.jpg +920.jpg +22399.jpg +14468.jpg +4207.jpg +28758.jpg +15985.jpg +14207.jpg +10004.jpg +4308.jpg +22471.jpg +7093.jpg +13929.jpg +13806.jpg +24293.jpg +27111.jpg +1025.jpg +11527.jpg +2506.jpg +29530.jpg +8692.jpg +26642.jpg +22415.jpg +29466.jpg +13249.jpg +19123.jpg +790.jpg +7890.jpg +21162.jpg +27285.jpg +7537.jpg +19286.jpg +29461.jpg +13893.jpg +9736.jpg +3676.jpg +21040.jpg +6847.jpg +29031.jpg +21749.jpg +23186.jpg +4246.jpg +4386.jpg +3084.jpg +14147.jpg +4547.jpg +19376.jpg +4362.jpg +22815.jpg +27789.jpg +24547.jpg +17364.jpg +8427.jpg +4239.jpg +1822.jpg +1643.jpg +7044.jpg +649.jpg +17559.jpg +21473.jpg +28907.jpg +10691.jpg +2287.jpg +21838.jpg +28024.jpg +2997.jpg +152.jpg +20585.jpg +25489.jpg +18583.jpg +26490.jpg +20276.jpg +28781.jpg +4841.jpg +27396.jpg +9880.jpg +22899.jpg +12358.jpg +5533.jpg +14370.jpg +26801.jpg +19593.jpg +4553.jpg +6176.jpg +28661.jpg +27130.jpg +15537.jpg +16576.jpg +28485.jpg +16680.jpg +14101.jpg +23925.jpg +3930.jpg +18741.jpg +6366.jpg +20597.jpg +1234.jpg +18191.jpg +19566.jpg +23622.jpg +28909.jpg +13601.jpg +16804.jpg +778.jpg +17565.jpg +22749.jpg +23530.jpg +29888.jpg +17279.jpg +3536.jpg +15737.jpg +8409.jpg +14256.jpg +5713.jpg +27882.jpg +22477.jpg +14048.jpg +12948.jpg +16971.jpg +21425.jpg +25206.jpg +23483.jpg +11118.jpg +19691.jpg +576.jpg +24793.jpg +5215.jpg +25416.jpg +17183.jpg +16047.jpg +1203.jpg +8856.jpg +14088.jpg +5229.jpg +27464.jpg +9036.jpg +5558.jpg +12842.jpg +8066.jpg +13097.jpg +3800.jpg +24707.jpg +7937.jpg +1324.jpg +24498.jpg +7284.jpg +13653.jpg +1683.jpg +10242.jpg +28785.jpg +23171.jpg +24856.jpg +20218.jpg +6927.jpg +5943.jpg +22303.jpg +9542.jpg +9867.jpg +14113.jpg +17246.jpg +22466.jpg +13237.jpg +18031.jpg +28235.jpg +24135.jpg +28674.jpg +29947.jpg +6345.jpg +5996.jpg +16865.jpg +7173.jpg +8531.jpg +8071.jpg +10268.jpg +6470.jpg +23523.jpg +8339.jpg +5037.jpg +20670.jpg +7706.jpg +8313.jpg +14599.jpg +20886.jpg +3397.jpg +11752.jpg +8056.jpg +19942.jpg +6692.jpg +11875.jpg +4205.jpg +6109.jpg +18745.jpg +16433.jpg +28453.jpg +2964.jpg +19347.jpg +9825.jpg +5012.jpg +4496.jpg +16748.jpg +6452.jpg +5451.jpg +5803.jpg +17232.jpg +13153.jpg +6805.jpg +13684.jpg +15938.jpg +128.jpg +4303.jpg +28106.jpg +2410.jpg +24020.jpg +14584.jpg +18923.jpg +27398.jpg +11924.jpg +24192.jpg +10309.jpg +6096.jpg +10616.jpg +10191.jpg +16639.jpg +10019.jpg +1396.jpg +26491.jpg +20078.jpg +24440.jpg +6217.jpg +3171.jpg +10539.jpg +25228.jpg +3392.jpg +19228.jpg +20357.jpg +6348.jpg +15591.jpg +12508.jpg +27785.jpg +12735.jpg +21233.jpg +5594.jpg +14214.jpg +11228.jpg +24216.jpg +12386.jpg +26672.jpg +29732.jpg +11185.jpg +17087.jpg +23512.jpg +18917.jpg +4156.jpg +2441.jpg +14944.jpg +22381.jpg +29766.jpg +15504.jpg +29697.jpg +23505.jpg +15053.jpg +9061.jpg +15667.jpg +16537.jpg +13551.jpg +17873.jpg +22371.jpg +29103.jpg +27385.jpg +26753.jpg +3760.jpg +21727.jpg +2107.jpg +16995.jpg +15163.jpg +15626.jpg +9746.jpg +93.jpg +28595.jpg +23328.jpg +20914.jpg +20383.jpg +17947.jpg +15600.jpg +9690.jpg +22615.jpg +14394.jpg +6471.jpg +17200.jpg +13769.jpg +2718.jpg +12996.jpg +27712.jpg +10842.jpg +27984.jpg +620.jpg +28120.jpg +4748.jpg +22490.jpg +12036.jpg +2346.jpg +3863.jpg +1197.jpg +1066.jpg +17429.jpg +7188.jpg +21076.jpg +19894.jpg +22769.jpg +11418.jpg +14670.jpg +26844.jpg +20275.jpg +12377.jpg +18915.jpg +3899.jpg +20673.jpg +15274.jpg +23199.jpg +17726.jpg +25445.jpg +21713.jpg +22037.jpg +1096.jpg +16548.jpg +23890.jpg +792.jpg +10221.jpg +18564.jpg +17111.jpg +24694.jpg +17553.jpg +17677.jpg +23863.jpg +2391.jpg +24490.jpg +5103.jpg +5758.jpg +29145.jpg +4426.jpg +27165.jpg +18008.jpg +7522.jpg +19966.jpg +9627.jpg +9228.jpg +18033.jpg +3902.jpg +19834.jpg +17163.jpg +3288.jpg +4321.jpg +23779.jpg +10276.jpg +3842.jpg +18778.jpg +27782.jpg +7174.jpg +19957.jpg +11567.jpg +20666.jpg +28789.jpg +24217.jpg +1175.jpg +9723.jpg +23761.jpg +10538.jpg +9139.jpg +19679.jpg +20453.jpg +10488.jpg +19581.jpg +11881.jpg +19163.jpg +27420.jpg +23541.jpg +18585.jpg +5780.jpg +9011.jpg +3757.jpg +697.jpg +23057.jpg +27585.jpg +21260.jpg +3948.jpg +8135.jpg +18648.jpg +668.jpg +23843.jpg +13371.jpg +29673.jpg +27030.jpg +6414.jpg +23973.jpg +2224.jpg +7644.jpg +2250.jpg +25516.jpg +23877.jpg +5466.jpg +16051.jpg +1264.jpg +22926.jpg +13.jpg +6903.jpg +29785.jpg +9589.jpg +22440.jpg +12580.jpg +20977.jpg +21454.jpg +22712.jpg +19771.jpg +27024.jpg +1421.jpg +547.jpg +11698.jpg +24069.jpg +16906.jpg +3566.jpg +11020.jpg +12563.jpg +9449.jpg +29234.jpg +20662.jpg +16028.jpg +6977.jpg +22118.jpg +17038.jpg +7825.jpg +25361.jpg +21153.jpg +11543.jpg +18904.jpg +27204.jpg +25167.jpg +7600.jpg +11644.jpg +10798.jpg +29474.jpg +19185.jpg +5892.jpg +4230.jpg +22838.jpg +8445.jpg +21282.jpg +23217.jpg +25329.jpg +21333.jpg +19535.jpg +29503.jpg +24204.jpg +564.jpg +18575.jpg +4044.jpg +4542.jpg +1639.jpg +27051.jpg +17079.jpg +28287.jpg +18215.jpg +2734.jpg +14075.jpg +19794.jpg +14818.jpg +27867.jpg +27614.jpg +5062.jpg +28730.jpg +9451.jpg +1863.jpg +19740.jpg +27287.jpg +18169.jpg +16841.jpg +17658.jpg +2809.jpg +9268.jpg +1195.jpg +3265.jpg +29152.jpg +12871.jpg +6741.jpg +10664.jpg +24934.jpg +20705.jpg +25666.jpg +13221.jpg +17261.jpg +20623.jpg +8594.jpg +22886.jpg +7315.jpg +4643.jpg +13803.jpg +11734.jpg +4753.jpg +4549.jpg +19253.jpg +19578.jpg +21678.jpg +1585.jpg +21336.jpg +10706.jpg +7394.jpg +1738.jpg +29750.jpg +14167.jpg +3364.jpg +22184.jpg +3263.jpg +7437.jpg +22607.jpg +6084.jpg +19592.jpg +19837.jpg +10981.jpg +1400.jpg +11921.jpg +24483.jpg +1804.jpg +1107.jpg +28551.jpg +19866.jpg +575.jpg +24008.jpg +20168.jpg +29815.jpg +10635.jpg +21684.jpg +6194.jpg +15438.jpg +27757.jpg +23562.jpg +24108.jpg +19485.jpg +15311.jpg +14160.jpg +26472.jpg +15088.jpg +29654.jpg +16676.jpg +21073.jpg +15533.jpg +29195.jpg +13490.jpg +5521.jpg +2666.jpg +3970.jpg +27794.jpg +20028.jpg +4355.jpg +26361.jpg +24305.jpg +21293.jpg +2244.jpg +12728.jpg +91.jpg +28444.jpg +2785.jpg +17218.jpg +5935.jpg +14058.jpg +6995.jpg +4287.jpg +16829.jpg +11475.jpg +15223.jpg +9822.jpg +10603.jpg +17894.jpg +27504.jpg +11441.jpg +15773.jpg +28298.jpg +29140.jpg +6908.jpg +21027.jpg +21654.jpg +17613.jpg +29501.jpg +6891.jpg +1472.jpg +20864.jpg +28971.jpg +2701.jpg +29890.jpg +13019.jpg +16877.jpg +21644.jpg +20387.jpg +18202.jpg +12028.jpg +9625.jpg +13814.jpg +16944.jpg +16907.jpg +21811.jpg +11229.jpg +7885.jpg +24836.jpg +18567.jpg +17148.jpg +26444.jpg +15969.jpg +9949.jpg +19742.jpg +7150.jpg +12703.jpg +21447.jpg +6883.jpg +24620.jpg +14648.jpg +15219.jpg +13628.jpg +7372.jpg +19214.jpg +16313.jpg +14628.jpg +8146.jpg +11514.jpg +28252.jpg +27427.jpg +8044.jpg +23600.jpg +15072.jpg +13848.jpg +21256.jpg +56.jpg +1388.jpg +26407.jpg +15686.jpg +23913.jpg +15064.jpg +10659.jpg +26279.jpg +23303.jpg +1716.jpg +21864.jpg +7497.jpg +3439.jpg +10560.jpg +5766.jpg +4441.jpg +27880.jpg +28877.jpg +25668.jpg +13378.jpg +28890.jpg +21482.jpg +28010.jpg +11653.jpg +23388.jpg +20172.jpg +14676.jpg +513.jpg +18214.jpg +954.jpg +11094.jpg +16686.jpg +1870.jpg +18056.jpg +7892.jpg +9776.jpg +20404.jpg +229.jpg +13656.jpg +3410.jpg +6754.jpg +23155.jpg +17924.jpg +5076.jpg +14425.jpg +3982.jpg +10602.jpg +28955.jpg +21138.jpg +12270.jpg +9179.jpg +14261.jpg +7116.jpg +23613.jpg +26451.jpg +9305.jpg +5110.jpg +13865.jpg +17966.jpg +21299.jpg +20626.jpg +17482.jpg +16598.jpg +14054.jpg +17128.jpg +28446.jpg +23334.jpg +335.jpg +28234.jpg +15511.jpg +13399.jpg +14864.jpg +2526.jpg +9537.jpg +19248.jpg +13758.jpg +10069.jpg +18943.jpg +1486.jpg +28019.jpg +22072.jpg +20912.jpg +10024.jpg +15804.jpg +24875.jpg +5882.jpg +8354.jpg +21955.jpg +29098.jpg +3451.jpg +251.jpg +26879.jpg +26465.jpg +628.jpg +15100.jpg +13792.jpg +29616.jpg +27428.jpg +15846.jpg +14695.jpg +22698.jpg +18582.jpg +23321.jpg +21141.jpg +20436.jpg +20314.jpg +3281.jpg +26163.jpg +17443.jpg +1438.jpg +22552.jpg +12402.jpg +28361.jpg +29701.jpg +17934.jpg +20613.jpg +25731.jpg +7367.jpg +24303.jpg +24032.jpg +27566.jpg +19036.jpg +11043.jpg +15774.jpg +25469.jpg +28961.jpg +17377.jpg +22526.jpg +11428.jpg +12205.jpg +25963.jpg +1378.jpg +2784.jpg +1593.jpg +20891.jpg +21026.jpg +25317.jpg +11247.jpg +8529.jpg +15235.jpg +25348.jpg +23784.jpg +3347.jpg +937.jpg +16943.jpg +22170.jpg +14389.jpg +21396.jpg +7228.jpg +2949.jpg +24581.jpg +2835.jpg +14885.jpg +5452.jpg +29611.jpg +5658.jpg +3148.jpg +10147.jpg +2822.jpg +14793.jpg +29908.jpg +15065.jpg +25179.jpg +9468.jpg +6740.jpg +2670.jpg +28174.jpg +10680.jpg +18244.jpg +8615.jpg +11144.jpg +9680.jpg +17534.jpg +10797.jpg +9502.jpg +15405.jpg +27917.jpg +20079.jpg +2422.jpg +8036.jpg +21693.jpg +20225.jpg +8744.jpg +14908.jpg +5989.jpg +15570.jpg +26048.jpg +25189.jpg +28660.jpg +7687.jpg +28226.jpg +12584.jpg +25231.jpg +3905.jpg +3719.jpg +24316.jpg +3050.jpg +23846.jpg +1051.jpg +3853.jpg +7535.jpg +21529.jpg +18180.jpg +4806.jpg +15900.jpg +21561.jpg +23515.jpg +6819.jpg +10272.jpg +23276.jpg +14747.jpg +14809.jpg +7941.jpg +8443.jpg +27031.jpg +19256.jpg +15736.jpg +15547.jpg +21124.jpg +11560.jpg +13795.jpg +16980.jpg +29655.jpg +17597.jpg +6747.jpg +421.jpg +24346.jpg +13947.jpg +5589.jpg +23994.jpg +27941.jpg +20289.jpg +17766.jpg +19668.jpg +8232.jpg +25895.jpg +29972.jpg +7034.jpg +20926.jpg +5827.jpg +6821.jpg +18406.jpg +24288.jpg +14688.jpg +29112.jpg +5815.jpg +24675.jpg +11596.jpg +22412.jpg +9214.jpg +25757.jpg +5567.jpg +5875.jpg +29580.jpg +20589.jpg +496.jpg +25448.jpg +12022.jpg +24676.jpg +19127.jpg +11008.jpg +19611.jpg +5648.jpg +28127.jpg +22266.jpg +23963.jpg +18898.jpg +18650.jpg +10619.jpg +28804.jpg +23901.jpg +26571.jpg +25529.jpg +18851.jpg +16322.jpg +25016.jpg +11500.jpg +10256.jpg +6281.jpg +2870.jpg +17632.jpg +2256.jpg +14926.jpg +1285.jpg +23185.jpg +15502.jpg +13350.jpg +2202.jpg +8841.jpg +4660.jpg +11102.jpg +24016.jpg +9545.jpg +27199.jpg +27907.jpg +13109.jpg +13055.jpg +5303.jpg +7959.jpg +28033.jpg +7969.jpg +14744.jpg +4985.jpg +23815.jpg +25514.jpg +11828.jpg +16053.jpg +7001.jpg +20633.jpg +12087.jpg +5886.jpg +19498.jpg +10264.jpg +12531.jpg +17007.jpg +5581.jpg +4148.jpg +15829.jpg +9764.jpg +9757.jpg +25085.jpg +20195.jpg +16130.jpg +9476.jpg +22806.jpg +19956.jpg +11207.jpg +13037.jpg +22744.jpg +9598.jpg +21734.jpg +14288.jpg +22102.jpg +29685.jpg +21510.jpg +10553.jpg +16637.jpg +20422.jpg +14943.jpg +25806.jpg +13161.jpg +1257.jpg +12991.jpg +14287.jpg +23003.jpg +14675.jpg +19249.jpg +12880.jpg +29546.jpg +24067.jpg +6737.jpg +22276.jpg +24610.jpg +3812.jpg +6989.jpg +27139.jpg +19212.jpg +15521.jpg +15717.jpg +10422.jpg +6172.jpg +17436.jpg +15496.jpg +15661.jpg +26908.jpg +10474.jpg +4377.jpg +4975.jpg +23835.jpg +12581.jpg +13639.jpg +24745.jpg +8702.jpg +15763.jpg +20873.jpg +5606.jpg +9403.jpg +11244.jpg +7094.jpg +17177.jpg +6485.jpg +26027.jpg +1047.jpg +7557.jpg +13832.jpg +11317.jpg +19288.jpg +9809.jpg +28351.jpg +7433.jpg +24400.jpg +22065.jpg +24750.jpg +11884.jpg +5522.jpg +10335.jpg +142.jpg +17108.jpg +19796.jpg +24426.jpg +19659.jpg +3631.jpg +29178.jpg +2320.jpg +11727.jpg +3529.jpg +21407.jpg +9384.jpg +20029.jpg +10419.jpg +16785.jpg +25902.jpg +11250.jpg +28910.jpg +10565.jpg +9955.jpg +10423.jpg +435.jpg +17782.jpg +10247.jpg +749.jpg +14852.jpg +6015.jpg +13212.jpg +14510.jpg +28054.jpg +5013.jpg +2242.jpg +11116.jpg +6582.jpg +21389.jpg +4822.jpg +3683.jpg +15322.jpg +2549.jpg +27844.jpg +17868.jpg +20243.jpg +17610.jpg +12373.jpg +22362.jpg +19930.jpg +29127.jpg +20664.jpg +28290.jpg +1858.jpg +24643.jpg +22398.jpg +5869.jpg +14714.jpg +16694.jpg +16985.jpg +5415.jpg +16891.jpg +16184.jpg +24473.jpg +1263.jpg +10678.jpg +12864.jpg +13683.jpg +740.jpg +3901.jpg +29090.jpg +6893.jpg +29809.jpg +21095.jpg +23791.jpg +16491.jpg +18117.jpg +10343.jpg +7721.jpg +25656.jpg +13759.jpg +4878.jpg +6343.jpg +21453.jpg +14216.jpg +27131.jpg +10029.jpg +8191.jpg +6105.jpg +3933.jpg +9246.jpg +28904.jpg +23596.jpg +23871.jpg +6534.jpg +9716.jpg +4340.jpg +24634.jpg +21686.jpg +7299.jpg +26213.jpg +10168.jpg +13813.jpg +19711.jpg +16450.jpg +1134.jpg +22848.jpg +26109.jpg +23077.jpg +12485.jpg +3129.jpg +26281.jpg +27890.jpg +18198.jpg +22784.jpg +23950.jpg +28165.jpg +15380.jpg +4245.jpg +16962.jpg +7075.jpg +14043.jpg +1305.jpg +12675.jpg +3268.jpg +20405.jpg +9037.jpg +29349.jpg +13057.jpg +10922.jpg +11199.jpg +18698.jpg +10017.jpg +20305.jpg +26569.jpg +27181.jpg +8876.jpg +7988.jpg +24766.jpg +13917.jpg +17054.jpg +22004.jpg +9279.jpg +21276.jpg +13311.jpg +12332.jpg +12037.jpg +11897.jpg +24444.jpg +15507.jpg +1956.jpg +894.jpg +19272.jpg +6665.jpg +27447.jpg +8983.jpg +4875.jpg +3205.jpg +10888.jpg +24523.jpg +5285.jpg +16606.jpg +15217.jpg +17445.jpg +20139.jpg +15348.jpg +22203.jpg +12089.jpg +7528.jpg +3638.jpg +3078.jpg +22097.jpg +23617.jpg +11635.jpg +15919.jpg +14086.jpg +24285.jpg +3183.jpg +12381.jpg +11252.jpg +7215.jpg +20828.jpg +21185.jpg +20061.jpg +29641.jpg +23544.jpg +20621.jpg +26508.jpg +12873.jpg +21469.jpg +462.jpg +2700.jpg +8417.jpg +14765.jpg +9494.jpg +4855.jpg +13128.jpg +23209.jpg +18535.jpg +27517.jpg +7998.jpg +13610.jpg +26664.jpg +10697.jpg +20901.jpg +7454.jpg +22396.jpg +18516.jpg +9392.jpg +15069.jpg +17142.jpg +15346.jpg +2863.jpg +3875.jpg +10689.jpg +11739.jpg +15123.jpg +11751.jpg +22829.jpg +23113.jpg +27731.jpg +4241.jpg +15047.jpg +15795.jpg +22655.jpg +12330.jpg +9455.jpg +20272.jpg +10283.jpg +13125.jpg +24616.jpg +12403.jpg +19440.jpg +17475.jpg +18970.jpg +29581.jpg +900.jpg +14687.jpg +4431.jpg +13859.jpg +15086.jpg +24140.jpg +26369.jpg +16394.jpg +2838.jpg +4264.jpg +10072.jpg +3999.jpg +28183.jpg +8527.jpg +8817.jpg +23688.jpg +6017.jpg +11756.jpg +3603.jpg +3696.jpg +29341.jpg +23091.jpg +368.jpg +16238.jpg +2527.jpg +6632.jpg +12167.jpg +927.jpg +1070.jpg +8041.jpg +25325.jpg +3570.jpg +21248.jpg +10134.jpg +8000.jpg +16453.jpg +678.jpg +28408.jpg +15029.jpg +18677.jpg +28211.jpg +10455.jpg +14507.jpg +16132.jpg +29492.jpg +28213.jpg +23966.jpg +28188.jpg +26186.jpg +18367.jpg +478.jpg +1884.jpg +1591.jpg +9127.jpg +23868.jpg +8784.jpg +4277.jpg +4578.jpg +6390.jpg +22537.jpg +14994.jpg +13005.jpg +17315.jpg +11783.jpg +21668.jpg +22794.jpg +17072.jpg +17719.jpg +29675.jpg +2003.jpg +24644.jpg +1950.jpg +5414.jpg +413.jpg +1019.jpg +3351.jpg +9801.jpg +10774.jpg +20997.jpg +5392.jpg +11845.jpg +14235.jpg +10775.jpg +22094.jpg +22643.jpg +7157.jpg +20111.jpg +7030.jpg +573.jpg +28830.jpg +3063.jpg +18013.jpg +24678.jpg +22980.jpg +9068.jpg +20990.jpg +1994.jpg +2576.jpg +9925.jpg +24782.jpg +27370.jpg +22888.jpg +24113.jpg +25927.jpg +14458.jpg +21998.jpg +4800.jpg +24155.jpg +5708.jpg +11069.jpg +4521.jpg +20181.jpg +11469.jpg +14769.jpg +20341.jpg +12724.jpg +25429.jpg +29015.jpg +3615.jpg +21554.jpg +27681.jpg +7370.jpg +6994.jpg +14051.jpg +22028.jpg +2682.jpg +19683.jpg +7954.jpg +15240.jpg +947.jpg +15744.jpg +15304.jpg +1574.jpg +15013.jpg +25043.jpg +21968.jpg +17344.jpg +13348.jpg +9168.jpg +18782.jpg +27293.jpg +26386.jpg +10030.jpg +18325.jpg +23951.jpg +16460.jpg +13179.jpg +29532.jpg +4019.jpg +17036.jpg +12933.jpg +24606.jpg +11980.jpg +6445.jpg +3444.jpg +23290.jpg +4691.jpg +17763.jpg +19833.jpg +20713.jpg +29121.jpg +8858.jpg +18162.jpg +10084.jpg +25738.jpg +25707.jpg +3498.jpg +29773.jpg +19877.jpg +19766.jpg +11985.jpg +2451.jpg +8563.jpg +15196.jpg +27656.jpg +29639.jpg +1606.jpg +16766.jpg +3335.jpg +23967.jpg +1449.jpg +7350.jpg +9576.jpg +10466.jpg +18901.jpg +16174.jpg +17775.jpg +20290.jpg +22000.jpg +18764.jpg +24121.jpg +21680.jpg +15276.jpg +11922.jpg +22089.jpg +54.jpg +13044.jpg +25952.jpg +4533.jpg +28441.jpg +8297.jpg +25019.jpg +15827.jpg +5777.jpg +10623.jpg +21083.jpg +11282.jpg +19682.jpg +11012.jpg +3704.jpg +1923.jpg +22534.jpg +21047.jpg +12317.jpg +21046.jpg +26471.jpg +5572.jpg +14467.jpg +22997.jpg +16138.jpg +4372.jpg +6681.jpg +19197.jpg +13107.jpg +26798.jpg +3109.jpg +28200.jpg +9977.jpg +14453.jpg +11883.jpg +29996.jpg +5038.jpg +20688.jpg +22658.jpg +22354.jpg +4949.jpg +14856.jpg +16203.jpg +27331.jpg +2276.jpg +20332.jpg +14914.jpg +16559.jpg +19491.jpg +26758.jpg +8356.jpg +24158.jpg +7750.jpg +25225.jpg +20733.jpg +13027.jpg +23021.jpg +13164.jpg +27879.jpg +5534.jpg +24507.jpg +25650.jpg +21631.jpg +22153.jpg +10494.jpg +25899.jpg +22397.jpg +26147.jpg +27864.jpg +11597.jpg +2162.jpg +27994.jpg +11814.jpg +1679.jpg +20036.jpg +13435.jpg +23317.jpg +20295.jpg +12337.jpg +7052.jpg +11265.jpg +23816.jpg +4713.jpg +8596.jpg +11741.jpg +5074.jpg +9942.jpg +1761.jpg +19168.jpg +3114.jpg +18087.jpg +3325.jpg +9683.jpg +21721.jpg +12451.jpg +14980.jpg +1607.jpg +26901.jpg +24193.jpg +16139.jpg +6353.jpg +27538.jpg +21219.jpg +14723.jpg +17772.jpg +7033.jpg +10140.jpg +20267.jpg +889.jpg +16187.jpg +28329.jpg +24126.jpg +8285.jpg +8305.jpg +24263.jpg +28790.jpg +17968.jpg +11674.jpg +27276.jpg +23482.jpg +20165.jpg +24347.jpg +13432.jpg +10857.jpg +21995.jpg +5511.jpg +3429.jpg +5091.jpg +18543.jpg +25710.jpg +20963.jpg +2185.jpg +6291.jpg +17881.jpg +8515.jpg +18011.jpg +21789.jpg +13958.jpg +19444.jpg +1297.jpg +12207.jpg +15748.jpg +111.jpg +6421.jpg +9302.jpg +26724.jpg +10653.jpg +27878.jpg +3817.jpg +22523.jpg +24049.jpg +15472.jpg +18779.jpg +15871.jpg +893.jpg +1733.jpg +20992.jpg +1365.jpg +7432.jpg +18040.jpg +25535.jpg +21494.jpg +25518.jpg +12945.jpg +4832.jpg +8617.jpg +15314.jpg +26059.jpg +23621.jpg +21821.jpg +14950.jpg +432.jpg +13862.jpg +4755.jpg +5383.jpg +28914.jpg +9013.jpg +10307.jpg +21106.jpg +7361.jpg +14990.jpg +12632.jpg +29329.jpg +16741.jpg +3334.jpg +14922.jpg +6959.jpg +26393.jpg +13965.jpg +13084.jpg +18073.jpg +6253.jpg +28438.jpg +24755.jpg +21462.jpg +4905.jpg +20927.jpg +18619.jpg +4107.jpg +17700.jpg +29156.jpg +27857.jpg +2384.jpg +4469.jpg +21810.jpg +26301.jpg +22472.jpg +20896.jpg +7022.jpg +25693.jpg +9812.jpg +25785.jpg +26653.jpg +18317.jpg +29083.jpg +18139.jpg +7005.jpg +26843.jpg +23098.jpg +19026.jpg +3298.jpg +647.jpg +1510.jpg +26045.jpg +28003.jpg +18719.jpg +7396.jpg +13021.jpg +602.jpg +12120.jpg +26467.jpg +8605.jpg +18305.jpg +599.jpg +27979.jpg +20096.jpg +14311.jpg +17648.jpg +3581.jpg +27415.jpg +23867.jpg +13825.jpg +25265.jpg +24926.jpg +28102.jpg +18628.jpg +28835.jpg +11431.jpg +21501.jpg +25890.jpg +12597.jpg +4385.jpg +22753.jpg +7926.jpg +15520.jpg +26651.jpg +10265.jpg +4693.jpg +21732.jpg +298.jpg +6453.jpg +20919.jpg +10901.jpg +23715.jpg +19515.jpg +17895.jpg +7035.jpg +5526.jpg +1188.jpg +13733.jpg +26362.jpg +23692.jpg +20910.jpg +26367.jpg +19852.jpg +6364.jpg +2680.jpg +25793.jpg +29407.jpg +16437.jpg +29388.jpg +18065.jpg +26207.jpg +15788.jpg +10629.jpg +11007.jpg +27236.jpg +15137.jpg +3809.jpg +29738.jpg +17937.jpg +14035.jpg +28248.jpg +2492.jpg +28197.jpg +20512.jpg +2883.jpg +294.jpg +23750.jpg +1010.jpg +27632.jpg +22713.jpg +25781.jpg +3958.jpg +22618.jpg +12762.jpg +18708.jpg +18675.jpg +15178.jpg +11547.jpg +9389.jpg +29221.jpg +7711.jpg +1350.jpg +22855.jpg +21953.jpg +21171.jpg +23725.jpg +20712.jpg +1627.jpg +8364.jpg +23853.jpg +29794.jpg +5002.jpg +26886.jpg +24212.jpg +23499.jpg +4008.jpg +15750.jpg +19617.jpg +1746.jpg +21805.jpg +11211.jpg +18472.jpg +19884.jpg +11566.jpg +21400.jpg +28807.jpg +18774.jpg +28616.jpg +1971.jpg +4519.jpg +7664.jpg +17253.jpg +13393.jpg +12819.jpg +358.jpg +16724.jpg +18316.jpg +25440.jpg +9095.jpg +2132.jpg +28207.jpg +28426.jpg +7010.jpg +27888.jpg +13030.jpg +8828.jpg +16590.jpg +12846.jpg +5259.jpg +9348.jpg +7405.jpg +548.jpg +21184.jpg +28716.jpg +26253.jpg +3613.jpg +12715.jpg +4056.jpg +22933.jpg +22254.jpg +1518.jpg +20789.jpg +27015.jpg +4813.jpg +345.jpg +14092.jpg +26610.jpg +17097.jpg +6804.jpg +3168.jpg +28416.jpg +28100.jpg +26487.jpg +5900.jpg +14148.jpg +25926.jpg +17463.jpg +11338.jpg +15289.jpg +11641.jpg +27246.jpg +7015.jpg +29764.jpg +7041.jpg +20491.jpg +3700.jpg +22529.jpg +25687.jpg +1714.jpg +15458.jpg +15556.jpg +28170.jpg +10458.jpg +23807.jpg +27964.jpg +10545.jpg +15607.jpg +20415.jpg +22573.jpg +3036.jpg +11585.jpg +2122.jpg +18164.jpg +7256.jpg +22421.jpg +18599.jpg +16412.jpg +11309.jpg +29205.jpg +202.jpg +21243.jpg +8385.jpg +12849.jpg +16176.jpg +23823.jpg +3731.jpg +21564.jpg +6864.jpg +24996.jpg +9151.jpg +27310.jpg +3067.jpg +10761.jpg +28217.jpg +22901.jpg +2237.jpg +273.jpg +27376.jpg +10556.jpg +29306.jpg +19244.jpg +13563.jpg +29270.jpg +18392.jpg +7555.jpg +23480.jpg +5069.jpg +26718.jpg +16090.jpg +16255.jpg +1640.jpg +12890.jpg +3522.jpg +8519.jpg +24709.jpg +6030.jpg +28882.jpg +8506.jpg +19229.jpg +28525.jpg +17305.jpg +18381.jpg +13599.jpg +19432.jpg +19190.jpg +16632.jpg +779.jpg +8027.jpg +26606.jpg +29053.jpg +13822.jpg +17850.jpg +26641.jpg +11235.jpg +1275.jpg +17541.jpg +10354.jpg +9197.jpg +25959.jpg +6042.jpg +20259.jpg +8590.jpg +9103.jpg +15930.jpg +15004.jpg +13280.jpg +29481.jpg +8813.jpg +26730.jpg +22337.jpg +11169.jpg +179.jpg +13750.jpg +24974.jpg +6802.jpg +29238.jpg +624.jpg +1256.jpg +11523.jpg +14381.jpg +29518.jpg +10992.jpg +9733.jpg +6739.jpg +6490.jpg +23839.jpg +17836.jpg +28244.jpg +27231.jpg +5057.jpg +3754.jpg +13625.jpg +3260.jpg +8632.jpg +10182.jpg +19898.jpg +21158.jpg +3488.jpg +11465.jpg +14168.jpg +11190.jpg +4651.jpg +6380.jpg +23989.jpg +16656.jpg +16349.jpg +11471.jpg +27609.jpg +14355.jpg +19855.jpg +5242.jpg +15320.jpg +28364.jpg +28756.jpg +10639.jpg +17589.jpg +7515.jpg +23802.jpg +3044.jpg +28859.jpg +12607.jpg +20859.jpg +18105.jpg +21704.jpg +22583.jpg +26978.jpg +18239.jpg +2298.jpg +14245.jpg +902.jpg +28423.jpg +5783.jpg +22332.jpg +19016.jpg +21611.jpg +21557.jpg +5843.jpg +19599.jpg +15201.jpg +14280.jpg +5199.jpg +24042.jpg +29765.jpg +4409.jpg +4394.jpg +22294.jpg +1741.jpg +2442.jpg +28089.jpg +5759.jpg +12594.jpg +5454.jpg +10333.jpg +10701.jpg +9162.jpg +6427.jpg +28941.jpg +28685.jpg +13472.jpg +9038.jpg +19207.jpg +4314.jpg +12939.jpg +20709.jpg +909.jpg +2395.jpg +22536.jpg +25988.jpg +15184.jpg +16531.jpg +17790.jpg +5385.jpg +15662.jpg +15359.jpg +15958.jpg +4604.jpg +22999.jpg +493.jpg +26504.jpg +21997.jpg +19251.jpg +27465.jpg +11524.jpg +12476.jpg +8681.jpg +1648.jpg +15271.jpg +15523.jpg +6670.jpg +2324.jpg +11261.jpg +12525.jpg +22579.jpg +20242.jpg +27518.jpg +27636.jpg +7271.jpg +24739.jpg +24471.jpg +29260.jpg +6929.jpg +29399.jpg +4920.jpg +3924.jpg +19420.jpg +26648.jpg +27397.jpg +15860.jpg +27028.jpg +2640.jpg +1288.jpg +23350.jpg +29713.jpg +7686.jpg +26038.jpg +20296.jpg +9693.jpg +24726.jpg +23307.jpg +3577.jpg +8487.jpg +3637.jpg +21905.jpg +15042.jpg +27262.jpg +15529.jpg +23942.jpg +14130.jpg +25744.jpg +831.jpg +1395.jpg +26062.jpg +16428.jpg +28182.jpg +24549.jpg +5723.jpg +3625.jpg +11083.jpg +8334.jpg +24602.jpg +12975.jpg +25655.jpg +29291.jpg +28821.jpg +28515.jpg +11754.jpg +1528.jpg +21798.jpg +19233.jpg +4525.jpg +5025.jpg +12172.jpg +27928.jpg +2748.jpg +10381.jpg +20439.jpg +5180.jpg +7419.jpg +27669.jpg +2038.jpg +27704.jpg +8921.jpg +29506.jpg +15959.jpg +6791.jpg +16550.jpg +6193.jpg +23781.jpg +4916.jpg +21576.jpg +21527.jpg +29009.jpg +29557.jpg +22061.jpg +11386.jpg +6136.jpg +19481.jpg +22225.jpg +22659.jpg +9172.jpg +7053.jpg +12102.jpg +7785.jpg +1181.jpg +381.jpg +1097.jpg +6046.jpg +23814.jpg +7264.jpg +13660.jpg +26180.jpg +22789.jpg +17037.jpg +16544.jpg +21584.jpg +1799.jpg +24962.jpg +2862.jpg +944.jpg +15629.jpg +21634.jpg +10975.jpg +28097.jpg +1225.jpg +28483.jpg +24209.jpg +16104.jpg +21698.jpg +14596.jpg +9844.jpg +3554.jpg +2909.jpg +8616.jpg +20844.jpg +984.jpg +20266.jpg +2726.jpg +4197.jpg +13510.jpg +21736.jpg +7589.jpg +13767.jpg +19227.jpg +15676.jpg +5235.jpg +27745.jpg +29879.jpg +3425.jpg +16192.jpg +23278.jpg +27194.jpg +21326.jpg +5046.jpg +27351.jpg +25775.jpg +21972.jpg +8022.jpg +23183.jpg +14274.jpg +11377.jpg +5475.jpg +20199.jpg +29759.jpg +6814.jpg +6218.jpg +13999.jpg +18464.jpg +4243.jpg +28925.jpg +16885.jpg +24876.jpg +12515.jpg +11515.jpg +24435.jpg +14371.jpg +26982.jpg +199.jpg +12216.jpg +29807.jpg +23495.jpg +6331.jpg +10600.jpg +4763.jpg +7063.jpg +7564.jpg +28588.jpg +528.jpg +25832.jpg +21747.jpg +21254.jpg +28817.jpg +1429.jpg +12893.jpg +9792.jpg +11178.jpg +5505.jpg +28939.jpg +3416.jpg +665.jpg +12042.jpg +12825.jpg +5891.jpg +29244.jpg +9219.jpg +8254.jpg +28257.jpg +28481.jpg +1329.jpg +2239.jpg +12300.jpg +14892.jpg +22408.jpg +26251.jpg +9897.jpg +10675.jpg +16626.jpg +901.jpg +29708.jpg +8886.jpg +9866.jpg +18594.jpg +1011.jpg +7191.jpg +28810.jpg +21917.jpg +18491.jpg +20256.jpg +13426.jpg +22920.jpg +14239.jpg +21697.jpg +1062.jpg +22467.jpg +3983.jpg +1260.jpg +21038.jpg +20917.jpg +25536.jpg +22176.jpg +22075.jpg +11910.jpg +14776.jpg +23476.jpg +23899.jpg +703.jpg +28131.jpg +14369.jpg +2519.jpg +23191.jpg +8787.jpg +8479.jpg +10439.jpg +29741.jpg +22111.jpg +22601.jpg +11111.jpg +23937.jpg +5518.jpg +23070.jpg +23150.jpg +1861.jpg +16574.jpg +11468.jpg +8927.jpg +20781.jpg +23490.jpg +25742.jpg +25049.jpg +9553.jpg +935.jpg +28187.jpg +23182.jpg +222.jpg +2368.jpg +13539.jpg +17143.jpg +10737.jpg +29871.jpg +15012.jpg +27630.jpg +19532.jpg +4345.jpg +6266.jpg +5093.jpg +20687.jpg +29184.jpg +8090.jpg +4751.jpg +16442.jpg +16119.jpg +24869.jpg +19191.jpg +19250.jpg +13165.jpg +18462.jpg +2775.jpg +28655.jpg +1424.jpg +9526.jpg +3525.jpg +9566.jpg +27177.jpg +17464.jpg +14290.jpg +26996.jpg +823.jpg +11753.jpg +8423.jpg +23072.jpg +23039.jpg +27468.jpg +10920.jpg +14509.jpg +25211.jpg +24218.jpg +363.jpg +2571.jpg +27215.jpg +29175.jpg +8777.jpg +21688.jpg +19274.jpg +20802.jpg +15822.jpg +16411.jpg +15256.jpg +18390.jpg +28696.jpg +11700.jpg +13898.jpg +1233.jpg +13626.jpg +20013.jpg +24789.jpg +2052.jpg +19406.jpg +23638.jpg +13304.jpg +28317.jpg +4749.jpg +26296.jpg +16625.jpg +9724.jpg +12131.jpg +16369.jpg +6848.jpg +20031.jpg +3470.jpg +4575.jpg +10745.jpg +11776.jpg +20699.jpg +20794.jpg +698.jpg +18470.jpg +191.jpg +17592.jpg +9496.jpg +1857.jpg +3252.jpg +19811.jpg +16892.jpg +29278.jpg +22107.jpg +21332.jpg +14335.jpg +21574.jpg +1146.jpg +23896.jpg +11356.jpg +19005.jpg +9374.jpg +3670.jpg +7195.jpg +14297.jpg +23927.jpg +20580.jpg +18380.jpg +17121.jpg +13535.jpg +9519.jpg +19846.jpg +12336.jpg +6016.jpg +16706.jpg +4938.jpg +11867.jpg +7107.jpg +20858.jpg +7153.jpg +13135.jpg +10232.jpg +23366.jpg +1760.jpg +4319.jpg +29803.jpg +29280.jpg +4090.jpg +19296.jpg +2343.jpg +18550.jpg +20609.jpg +19743.jpg +6237.jpg +23742.jpg +17951.jpg +26525.jpg +10486.jpg +26315.jpg +28222.jpg +27374.jpg +29624.jpg +23011.jpg +22942.jpg +18319.jpg +4981.jpg +28597.jpg +11342.jpg +1147.jpg +15299.jpg +14513.jpg +18612.jpg +5560.jpg +15453.jpg +25844.jpg +20505.jpg +853.jpg +2524.jpg +11321.jpg +26425.jpg +17165.jpg +3479.jpg +7811.jpg +4422.jpg +15891.jpg +21785.jpg +25538.jpg +1116.jpg +28845.jpg +9974.jpg +6807.jpg +15356.jpg +18471.jpg +16162.jpg +25296.jpg +28332.jpg +23389.jpg +1493.jpg +15070.jpg +15239.jpg +3423.jpg +26352.jpg +24922.jpg +13451.jpg +22679.jpg +1382.jpg +14550.jpg +29446.jpg +15037.jpg +364.jpg +18046.jpg +13616.jpg +25021.jpg +21383.jpg +9556.jpg +5092.jpg +27694.jpg +16290.jpg +26611.jpg +3120.jpg +12141.jpg +27954.jpg +14038.jpg +24376.jpg +17066.jpg +29102.jpg +6985.jpg +28579.jpg +28805.jpg +15539.jpg +8220.jpg +19489.jpg +27366.jpg +26424.jpg +29800.jpg +2635.jpg +16857.jpg +10105.jpg +5400.jpg +2590.jpg +14183.jpg +27914.jpg +18002.jpg +4015.jpg +23094.jpg +29650.jpg +7231.jpg +517.jpg +16589.jpg +9907.jpg +6419.jpg +3302.jpg +18814.jpg +25874.jpg +8774.jpg +24460.jpg +23187.jpg +14760.jpg +17952.jpg +27190.jpg +1937.jpg +18161.jpg +25059.jpg +7690.jpg +27188.jpg +27873.jpg +3230.jpg +24590.jpg +15812.jpg +17226.jpg +1418.jpg +22165.jpg +23238.jpg +17203.jpg +603.jpg +15166.jpg +15948.jpg +10299.jpg +14413.jpg +4721.jpg +4550.jpg +4337.jpg +25939.jpg +22723.jpg +11791.jpg +12471.jpg +37.jpg +20223.jpg +22049.jpg +17168.jpg +5416.jpg +12986.jpg +27436.jpg +5677.jpg +7771.jpg +26085.jpg +17161.jpg +20018.jpg +4265.jpg +192.jpg +9089.jpg +9386.jpg +22404.jpg +23329.jpg +25047.jpg +28546.jpg +8103.jpg +4470.jpg +10759.jpg +25884.jpg +22465.jpg +7494.jpg +9895.jpg +24772.jpg +10954.jpg +13543.jpg +6583.jpg +1457.jpg +12951.jpg +11037.jpg +20469.jpg +13900.jpg +9640.jpg +2803.jpg +16075.jpg +20309.jpg +11366.jpg +26119.jpg +24064.jpg +1375.jpg +13990.jpg +7895.jpg +28690.jpg +2335.jpg +14597.jpg +25951.jpg +13392.jpg +20201.jpg +18746.jpg +28389.jpg +24242.jpg +8234.jpg +9008.jpg +29338.jpg +1880.jpg +4221.jpg +29110.jpg +9287.jpg +24888.jpg +126.jpg +21186.jpg +27448.jpg +2057.jpg +20508.jpg +23665.jpg +3894.jpg +15168.jpg +6111.jpg +20202.jpg +3426.jpg +23120.jpg +24852.jpg +17033.jpg +20103.jpg +23428.jpg +14299.jpg +18980.jpg +28894.jpg +762.jpg +29225.jpg +4752.jpg +3125.jpg +16332.jpg +3394.jpg +24988.jpg +7101.jpg +14560.jpg +14773.jpg +19795.jpg +27729.jpg +26953.jpg +19263.jpg +452.jpg +3012.jpg +1823.jpg +4154.jpg +23831.jpg +27595.jpg +18710.jpg +17715.jpg +16668.jpg +9311.jpg +22257.jpg +26221.jpg +12483.jpg +21742.jpg +29627.jpg +24344.jpg +9885.jpg +26519.jpg +22209.jpg +7627.jpg +6221.jpg +8736.jpg +11799.jpg +25313.jpg +2478.jpg +6127.jpg +24250.jpg +16905.jpg +6794.jpg +3104.jpg +8935.jpg +3073.jpg +13606.jpg +20869.jpg +19167.jpg +973.jpg +23918.jpg +13324.jpg +4904.jpg +26994.jpg +22096.jpg +2169.jpg +825.jpg +6365.jpg +19445.jpg +5709.jpg +16395.jpg +25584.jpg +4825.jpg +14716.jpg +7046.jpg +1423.jpg +20792.jpg +17312.jpg +20478.jpg +10068.jpg +8271.jpg +168.jpg +5294.jpg +27272.jpg +633.jpg +4138.jpg +15695.jpg +23344.jpg +5784.jpg +1314.jpg +1232.jpg +16817.jpg +29229.jpg +28884.jpg +19034.jpg +9856.jpg +24760.jpg +17124.jpg +9357.jpg +18457.jpg +27893.jpg +14187.jpg +2092.jpg +19140.jpg +3562.jpg +27136.jpg +15826.jpg +6138.jpg +14103.jpg +4612.jpg +9732.jpg +6851.jpg +10464.jpg +21931.jpg +16915.jpg +19557.jpg +22858.jpg +15516.jpg +4447.jpg +2905.jpg +7263.jpg +16359.jpg +14483.jpg +8411.jpg +13118.jpg +26717.jpg +2399.jpg +11896.jpg +16415.jpg +17064.jpg +17685.jpg +19598.jpg +3057.jpg +18693.jpg +2632.jpg +21375.jpg +21252.jpg +3311.jpg +26264.jpg +21354.jpg +1244.jpg +29802.jpg +11963.jpg +24928.jpg +3098.jpg +5743.jpg +7170.jpg +4101.jpg +8342.jpg +21830.jpg +29051.jpg +1103.jpg +1377.jpg +27020.jpg +24719.jpg +17275.jpg +19710.jpg +20188.jpg +6675.jpg +8573.jpg +9405.jpg +4545.jpg +7907.jpg +11346.jpg +12129.jpg +9682.jpg +981.jpg +2133.jpg +6558.jpg +23977.jpg +19627.jpg +23143.jpg +24028.jpg +4444.jpg +7729.jpg +8524.jpg +6116.jpg +8800.jpg +14190.jpg +23724.jpg +5895.jpg +17264.jpg +4894.jpg +11508.jpg +16812.jpg +6507.jpg +20280.jpg +17997.jpg +13447.jpg +150.jpg +3782.jpg +453.jpg +8029.jpg +23045.jpg +27606.jpg +18372.jpg +8575.jpg +18222.jpg +2301.jpg +28400.jpg +20411.jpg +6274.jpg +9328.jpg +635.jpg +26627.jpg +12896.jpg +16213.jpg +29524.jpg +26893.jpg +1739.jpg +6432.jpg +7124.jpg +14817.jpg +22403.jpg +3273.jpg +8497.jpg +25851.jpg +27483.jpg +12280.jpg +18534.jpg +8926.jpg +9463.jpg +842.jpg +14935.jpg +29448.jpg +22263.jpg +15528.jpg +28254.jpg +23442.jpg +27022.jpg +28641.jpg +16702.jpg +23905.jpg +27442.jpg +29189.jpg +15914.jpg +21677.jpg +9361.jpg +28196.jpg +26518.jpg +12441.jpg +3839.jpg +7997.jpg +13433.jpg +1562.jpg +8322.jpg +10224.jpg +10414.jpg +17916.jpg +5601.jpg +24697.jpg +7411.jpg +1258.jpg +4309.jpg +24748.jpg +25405.jpg +15171.jpg +25194.jpg +27092.jpg +12760.jpg +13483.jpg +10715.jpg +19614.jpg +3132.jpg +10854.jpg +27992.jpg +3556.jpg +11091.jpg +4585.jpg +5829.jpg +6457.jpg +10951.jpg +25667.jpg +12034.jpg +19077.jpg +8136.jpg +1473.jpg +16288.jpg +4009.jpg +6950.jpg +228.jpg +11501.jpg +27853.jpg +18004.jpg +18994.jpg +10405.jpg +24672.jpg +19490.jpg +12497.jpg +22445.jpg +22212.jpg +19433.jpg +4561.jpg +23642.jpg +15249.jpg +20903.jpg +4734.jpg +14226.jpg +19171.jpg +8461.jpg +807.jpg +22565.jpg +1600.jpg +14955.jpg +27350.jpg +19246.jpg +9459.jpg +14428.jpg +25379.jpg +11945.jpg +17265.jpg +16072.jpg +28863.jpg +19381.jpg +15275.jpg +24512.jpg +13102.jpg +11693.jpg +13525.jpg +14947.jpg +19826.jpg +16831.jpg +16903.jpg +18048.jpg +10212.jpg +22847.jpg +3516.jpg +12254.jpg +24955.jpg +5412.jpg +28559.jpg +7982.jpg +4478.jpg +7273.jpg +20674.jpg +29432.jpg +28513.jpg +26371.jpg +4440.jpg +13263.jpg +133.jpg +8105.jpg +23331.jpg +6314.jpg +24300.jpg +10763.jpg +14876.jpg +18480.jpg +2186.jpg +17193.jpg +11065.jpg +23772.jpg +11401.jpg +11373.jpg +27863.jpg +1402.jpg +9963.jpg +2936.jpg +2885.jpg +15968.jpg +14220.jpg +27387.jpg +19344.jpg +3559.jpg +19595.jpg +17948.jpg +10496.jpg +2264.jpg +13942.jpg +26977.jpg +4662.jpg +24740.jpg +16251.jpg +4235.jpg +11293.jpg +12808.jpg +12527.jpg +21041.jpg +7547.jpg +23559.jpg +12031.jpg +18433.jpg +915.jpg +25458.jpg +21539.jpg +6166.jpg +968.jpg +27698.jpg +16492.jpg +489.jpg +24871.jpg +29256.jpg +24785.jpg +16199.jpg +4185.jpg +28382.jpg +12065.jpg +2856.jpg +7429.jpg +1589.jpg +28795.jpg +16679.jpg +20397.jpg +7239.jpg +22864.jpg +190.jpg +6069.jpg +1366.jpg +6460.jpg +10873.jpg +12094.jpg +16863.jpg +16511.jpg +12225.jpg +10048.jpg +26603.jpg +29566.jpg +13464.jpg +15690.jpg +7613.jpg +25601.jpg +16677.jpg +23281.jpg +29819.jpg +24540.jpg +8043.jpg +14781.jpg +22506.jpg +20384.jpg +14847.jpg +6013.jpg +24941.jpg +3667.jpg +9441.jpg +21783.jpg +24410.jpg +10892.jpg +20716.jpg +8504.jpg +6769.jpg +22485.jpg +13283.jpg +3874.jpg +28823.jpg +16388.jpg +12875.jpg +7217.jpg +9440.jpg +1362.jpg +17378.jpg +13360.jpg +705.jpg +25302.jpg +26295.jpg +5842.jpg +9506.jpg +17636.jpg +23456.jpg +21717.jpg +29298.jpg +18821.jpg +18790.jpg +5530.jpg +8808.jpg +23936.jpg +21338.jpg +3040.jpg +26880.jpg +12560.jpg +23833.jpg +11763.jpg +25044.jpg +22714.jpg +18320.jpg +25705.jpg +17745.jpg +29369.jpg +5716.jpg +7643.jpg +12432.jpg +29911.jpg +1586.jpg +225.jpg +8855.jpg +29612.jpg +26807.jpg +8065.jpg +997.jpg +19758.jpg +24043.jpg +12379.jpg +497.jpg +3541.jpg +7862.jpg +8222.jpg +5653.jpg +16160.jpg +8532.jpg +19991.jpg +458.jpg +22932.jpg +14487.jpg +21533.jpg +21351.jpg +15343.jpg +18547.jpg +18490.jpg +28394.jpg +13434.jpg +17507.jpg +16054.jpg +924.jpg +10997.jpg +10329.jpg +22699.jpg +7404.jpg +25774.jpg +3240.jpg +2266.jpg +8400.jpg +29620.jpg +7269.jpg +12727.jpg +25025.jpg +12335.jpg +24573.jpg +18918.jpg +25862.jpg +28688.jpg +21247.jpg +29668.jpg +28014.jpg +23683.jpg +23263.jpg +1078.jpg +13860.jpg +23557.jpg +18874.jpg +9836.jpg +17192.jpg +2840.jpg +1467.jpg +20374.jpg +5080.jpg +11997.jpg +10910.jpg +19786.jpg +18818.jpg +27601.jpg +12490.jpg +15376.jpg +23825.jpg +23015.jpg +19913.jpg +23079.jpg +20455.jpg +18168.jpg +11555.jpg +29228.jpg +21418.jpg +3641.jpg +14420.jpg +16249.jpg +12014.jpg +9135.jpg +17709.jpg +6442.jpg +554.jpg +7172.jpg +5340.jpg +29441.jpg +18686.jpg +12350.jpg +16719.jpg +23726.jpg +16123.jpg +11219.jpg +14044.jpg +4406.jpg +365.jpg +11748.jpg +7512.jpg +16309.jpg +17317.jpg +5216.jpg +17825.jpg +28523.jpg +2025.jpg +18265.jpg +5568.jpg +11017.jpg +9196.jpg +23774.jpg +1384.jpg +12706.jpg +13762.jpg +13673.jpg +18577.jpg +12056.jpg +27454.jpg +29360.jpg +10577.jpg +10410.jpg +29793.jpg +2600.jpg +21689.jpg +23956.jpg +12417.jpg +7186.jpg +18178.jpg +14827.jpg +26284.jpg +155.jpg +24783.jpg +6064.jpg +10748.jpg +690.jpg +20949.jpg +13285.jpg +10311.jpg +8620.jpg +15963.jpg +2814.jpg +8387.jpg +5345.jpg +19688.jpg +9109.jpg +5714.jpg +23728.jpg +17467.jpg +15353.jpg +26773.jpg +1475.jpg +11958.jpg +996.jpg +22093.jpg +18999.jpg +9814.jpg +24439.jpg +10177.jpg +12426.jpg +828.jpg +9131.jpg +28337.jpg +29939.jpg +28085.jpg +19947.jpg +8345.jpg +6771.jpg +22776.jpg +4413.jpg +23474.jpg +46.jpg +2741.jpg +21263.jpg +8367.jpg +2450.jpg +6877.jpg +7962.jpg +18155.jpg +26638.jpg +19091.jpg +29300.jpg +24796.jpg +29101.jpg +14448.jpg +23587.jpg +10809.jpg +19690.jpg +21179.jpg +2160.jpg +23055.jpg +992.jpg +15905.jpg +1981.jpg +19501.jpg +9052.jpg +12623.jpg +302.jpg +22746.jpg +6247.jpg +29930.jpg +21199.jpg +7098.jpg +162.jpg +10395.jpg +26210.jpg +3949.jpg +11000.jpg +14758.jpg +20611.jpg +6256.jpg +2994.jpg +9024.jpg +13724.jpg +9641.jpg +23405.jpg +26161.jpg +5632.jpg +4446.jpg +8246.jpg +25.jpg +6760.jpg +19539.jpg +25461.jpg +22905.jpg +18018.jpg +16801.jpg +21884.jpg +20665.jpg +17728.jpg +18735.jpg +15060.jpg +19153.jpg +16360.jpg +23240.jpg +12792.jpg +4408.jpg +27048.jpg +17491.jpg +28581.jpg +25974.jpg +18421.jpg +18614.jpg +8878.jpg +10399.jpg +24237.jpg +3153.jpg +25141.jpg +27999.jpg +17529.jpg +24717.jpg +29028.jpg +27600.jpg +10955.jpg +16839.jpg +3207.jpg +4201.jpg +17430.jpg +26804.jpg +1763.jpg +27776.jpg +5332.jpg +10670.jpg +27652.jpg +10912.jpg +9767.jpg +18702.jpg +7634.jpg +26853.jpg +21218.jpg +6293.jpg +2896.jpg +1007.jpg +10651.jpg +7691.jpg +14466.jpg +2014.jpg +4305.jpg +13461.jpg +19814.jpg +7987.jpg +17115.jpg +8429.jpg +16424.jpg +26688.jpg +24964.jpg +17208.jpg +2767.jpg +26696.jpg +6074.jpg +1808.jpg +19512.jpg +14408.jpg +14310.jpg +19366.jpg +5540.jpg +18626.jpg +21374.jpg +20269.jpg +19865.jpg +17062.jpg +14923.jpg +6232.jpg +2721.jpg +26226.jpg +25029.jpg +24050.jpg +19741.jpg +8869.jpg +19553.jpg +23556.jpg +12772.jpg +14320.jpg +17839.jpg +8521.jpg +10031.jpg +13259.jpg +9241.jpg +20563.jpg +29660.jpg +7888.jpg +22202.jpg +267.jpg +13960.jpg +14850.jpg +13798.jpg +29395.jpg +7823.jpg +22434.jpg +22876.jpg +9761.jpg +25424.jpg +12913.jpg +25944.jpg +26364.jpg +19488.jpg +15579.jpg +21340.jpg +20522.jpg +26970.jpg +8150.jpg +25462.jpg +25776.jpg +10607.jpg +19656.jpg +29586.jpg +6518.jpg +23662.jpg +21672.jpg +13389.jpg +14349.jpg +28982.jpg +18257.jpg +8786.jpg +19113.jpg +20148.jpg +25258.jpg +2617.jpg +18816.jpg +12718.jpg +22325.jpg +22407.jpg +18364.jpg +25596.jpg +14962.jpg +23360.jpg +28264.jpg +325.jpg +2657.jpg +12238.jpg +10840.jpg +6778.jpg +2628.jpg +19748.jpg +24511.jpg +12004.jpg +6187.jpg +15371.jpg +11388.jpg +2599.jpg +27900.jpg +28180.jpg +10640.jpg +15918.jpg +25493.jpg +5491.jpg +14264.jpg +10492.jpg +10190.jpg +20895.jpg +6110.jpg +19152.jpg +24539.jpg +24045.jpg +1352.jpg +28334.jpg +11358.jpg +2189.jpg +29832.jpg +2661.jpg +11552.jpg +20850.jpg +1672.jpg +13177.jpg +26987.jpg +4670.jpg +3191.jpg +6066.jpg +16367.jpg +22120.jpg +13088.jpg +7344.jpg +945.jpg +25566.jpg +16806.jpg +16567.jpg +16612.jpg +19915.jpg +27692.jpg +24442.jpg +11823.jpg +278.jpg +16145.jpg +23461.jpg +7363.jpg +6357.jpg +15706.jpg +22796.jpg +26208.jpg +6235.jpg +24728.jpg +23749.jpg +4895.jpg +24870.jpg +20476.jpg +20627.jpg +22079.jpg +21301.jpg +12774.jpg +23212.jpg +19285.jpg +3927.jpg +8846.jpg +7471.jpg +21118.jpg +12066.jpg +19762.jpg +17953.jpg +21154.jpg +8503.jpg +26799.jpg +14824.jpg +13674.jpg +28985.jpg +21251.jpg +1340.jpg +23177.jpg +11983.jpg +27501.jpg +17295.jpg +8338.jpg +3811.jpg +9535.jpg +12421.jpg +6407.jpg +2255.jpg +18456.jpg +9130.jpg +10503.jpg +18663.jpg +28572.jpg +1444.jpg +20153.jpg +15937.jpg +24681.jpg +10015.jpg +13593.jpg +5745.jpg +14593.jpg +25182.jpg +26438.jpg +16210.jpg +25993.jpg +1610.jpg +767.jpg +21652.jpg +15247.jpg +29538.jpg +26270.jpg +14464.jpg +28990.jpg +29323.jpg +17863.jpg +22524.jpg +318.jpg +25396.jpg +15947.jpg +13963.jpg +19449.jpg +20656.jpg +9973.jpg +3013.jpg +8803.jpg +6843.jpg +19404.jpg +24560.jpg +24420.jpg +29552.jpg +25284.jpg +17756.jpg +12473.jpg +20228.jpg +17260.jpg +21856.jpg +24720.jpg +22970.jpg +18983.jpg +20606.jpg +10665.jpg +29958.jpg +19340.jpg +28073.jpg +19003.jpg +380.jpg +5370.jpg +17424.jpg +701.jpg +9859.jpg +1101.jpg +16414.jpg +17872.jpg +258.jpg +7705.jpg +7619.jpg +17645.jpg +22774.jpg +71.jpg +12852.jpg +21984.jpg +25036.jpg +28640.jpg +7089.jpg +29173.jpg +29435.jpg +9918.jpg +12914.jpg +18147.jpg +26874.jpg +12831.jpg +4587.jpg +7603.jpg +27256.jpg +12453.jpg +12053.jpg +11350.jpg +17242.jpg +17175.jpg +9863.jpg +25095.jpg +1800.jpg +4214.jpg +22741.jpg +26005.jpg +24867.jpg +3727.jpg +23522.jpg +11445.jpg +6122.jpg +9040.jpg +23175.jpg +5894.jpg +21915.jpg +15854.jpg +3604.jpg +3322.jpg +12708.jpg +18314.jpg +390.jpg +16456.jpg +102.jpg +8684.jpg +13536.jpg +23926.jpg +18927.jpg +28932.jpg +2712.jpg +2689.jpg +2565.jpg +1353.jpg +11492.jpg +9926.jpg +18400.jpg +15771.jpg +6521.jpg +4069.jpg +26498.jpg +9560.jpg +2642.jpg +29406.jpg +29096.jpg +8610.jpg +28691.jpg +2934.jpg +27953.jpg +5059.jpg +25969.jpg +15227.jpg +14121.jpg +26346.jpg +890.jpg +14378.jpg +27559.jpg +18678.jpg +19732.jpg +17114.jpg +3235.jpg +2135.jpg +3445.jpg +57.jpg +20394.jpg +4509.jpg +23928.jpg +12886.jpg +12469.jpg +3788.jpg +799.jpg +25105.jpg +3056.jpg +17207.jpg +20718.jpg +19105.jpg +6466.jpg +21596.jpg +3160.jpg +18520.jpg +25153.jpg +29319.jpg +22599.jpg +12311.jpg +13876.jpg +1460.jpg +20879.jpg +904.jpg +13962.jpg +18307.jpg +15559.jpg +18458.jpg +12517.jpg +4919.jpg +17171.jpg +16030.jpg +29396.jpg +28152.jpg +1361.jpg +23371.jpg +26076.jpg +3130.jpg +14863.jpg +20006.jpg +11932.jpg +18900.jpg +21441.jpg +16895.jpg +24145.jpg +7738.jpg +1628.jpg +22875.jpg +12023.jpg +17494.jpg +27311.jpg +1653.jpg +2069.jpg +5251.jpg +26342.jpg +15842.jpg +27943.jpg +17994.jpg +20279.jpg +24545.jpg +13276.jpg +709.jpg +25089.jpg +29309.jpg +161.jpg +13575.jpg +2521.jpg +13905.jpg +8537.jpg +27922.jpg +6717.jpg +6502.jpg +12182.jpg +16317.jpg +12475.jpg +4281.jpg +25093.jpg +25103.jpg +24131.jpg +16720.jpg +15387.jpg +15810.jpg +18600.jpg +16020.jpg +28041.jpg +22171.jpg +22369.jpg +18948.jpg +19136.jpg +13080.jpg +14388.jpg +10145.jpg +15619.jpg +26681.jpg +12549.jpg +7615.jpg +15719.jpg +22175.jpg +4192.jpg +7908.jpg +24613.jpg +7616.jpg +14202.jpg +16237.jpg +15421.jpg +18879.jpg +28096.jpg +24248.jpg +8406.jpg +28420.jpg +19311.jpg +6724.jpg +7074.jpg +15397.jpg +29693.jpg +28044.jpg +19147.jpg +26765.jpg +28908.jpg +26723.jpg +9754.jpg +25569.jpg +29964.jpg +18339.jpg +12284.jpg +8050.jpg +26516.jpg +10688.jpg +7556.jpg +24307.jpg +22053.jpg +20534.jpg +24704.jpg +25875.jpg +10902.jpg +27993.jpg +7478.jpg +21331.jpg +16996.jpg +9971.jpg +24282.jpg +1728.jpg +16350.jpg +27433.jpg +27717.jpg +24571.jpg +14419.jpg +15122.jpg +11075.jpg +3348.jpg +28593.jpg +23658.jpg +11223.jpg +9467.jpg +25500.jpg +9266.jpg +10871.jpg +29574.jpg +2482.jpg +3686.jpg +26545.jpg +19132.jpg +13945.jpg +9211.jpg +20830.jpg +8112.jpg +8675.jpg +14862.jpg +10924.jpg +12277.jpg +14068.jpg +24197.jpg +22982.jpg +25713.jpg +28797.jpg +24597.jpg +15568.jpg +612.jpg +6968.jpg +6852.jpg +26678.jpg +12117.jpg +4031.jpg +14610.jpg +18813.jpg +8464.jpg +18587.jpg +22611.jpg +22778.jpg +19262.jpg +24206.jpg +18051.jpg +7502.jpg +24757.jpg +12293.jpg +19839.jpg +12691.jpg +23808.jpg +21979.jpg +20060.jpg +1570.jpg +16731.jpg +5753.jpg +9657.jpg +21285.jpg +9301.jpg +7241.jpg +2834.jpg +29286.jpg +24434.jpg +22029.jpg +12827.jpg +16973.jpg +19386.jpg +15904.jpg +23411.jpg +6579.jpg +21012.jpg +636.jpg +22671.jpg +22348.jpg +23660.jpg +24085.jpg +29984.jpg +23693.jpg +10346.jpg +10404.jpg +2984.jpg +9805.jpg +19316.jpg +14262.jpg +4798.jpg +29923.jpg +25395.jpg +14925.jpg +5443.jpg +7894.jpg +23095.jpg +21231.jpg +22787.jpg +29936.jpg +26072.jpg +2488.jpg +6393.jpg +3418.jpg +25913.jpg +12467.jpg +8496.jpg +23488.jpg +107.jpg +19602.jpg +23325.jpg +12559.jpg +9161.jpg +15531.jpg +9623.jpg +8171.jpg +19718.jpg +8544.jpg +22154.jpg +6892.jpg +21636.jpg +3786.jpg +10365.jpg +21870.jpg +19875.jpg +28699.jpg +21091.jpg +26865.jpg +22041.jpg +3920.jpg +15148.jpg +23525.jpg +24009.jpg +25960.jpg +785.jpg +17858.jpg +21600.jpg +4829.jpg +11607.jpg +20113.jpg +14910.jpg +21755.jpg +7313.jpg +4804.jpg +28139.jpg +28124.jpg +14461.jpg +1753.jpg +1875.jpg +29665.jpg +24187.jpg +9465.jpg +27320.jpg +12309.jpg +2733.jpg +816.jpg +8011.jpg +23243.jpg +16526.jpg +27147.jpg +15569.jpg +3596.jpg +19555.jpg +297.jpg +12761.jpg +7122.jpg +25201.jpg +12759.jpg +1930.jpg +10104.jpg +10447.jpg +9899.jpg +14552.jpg +14625.jpg +25723.jpg +12586.jpg +23467.jpg +21828.jpg +22700.jpg +23822.jpg +5944.jpg +27086.jpg +9375.jpg +23748.jpg +12399.jpg +5084.jpg +1161.jpg +9159.jpg +11539.jpg +5212.jpg +154.jpg +20988.jpg +18877.jpg +9749.jpg +9665.jpg +10338.jpg +9146.jpg +12807.jpg +16396.jpg +17981.jpg +6079.jpg +8542.jpg +13671.jpg +2723.jpg +457.jpg +2027.jpg +3975.jpg +18963.jpg +20897.jpg +26211.jpg +21000.jpg +27624.jpg +27216.jpg +9406.jpg +6296.jpg +13757.jpg +13144.jpg +17047.jpg +27621.jpg +4872.jpg +26920.jpg +9980.jpg +26543.jpg +20451.jpg +27412.jpg +27522.jpg +10547.jpg +24450.jpg +28600.jpg +16735.jpg +24747.jpg +3618.jpg +14421.jpg +3261.jpg +28397.jpg +14255.jpg +5070.jpg +2414.jpg +13689.jpg +29843.jpg +12667.jpg +2686.jpg +8835.jpg +27073.jpg +5845.jpg +13742.jpg +18515.jpg +2127.jpg +18196.jpg +25994.jpg +9204.jpg +23112.jpg +1864.jpg +10079.jpg +17931.jpg +7051.jpg +3652.jpg +18652.jpg +27682.jpg +9274.jpg +17414.jpg +23054.jpg +7121.jpg +13337.jpg +26480.jpg +12877.jpg +9318.jpg +14572.jpg +26114.jpg +14027.jpg +21035.jpg +17004.jpg +8546.jpg +25501.jpg +21899.jpg +11194.jpg +27957.jpg +16507.jpg +13784.jpg +17599.jpg +23654.jpg +15173.jpg +20964.jpg +7078.jpg +8649.jpg +23567.jpg +29749.jpg +14957.jpg +16056.jpg +13800.jpg +20575.jpg +26984.jpg +755.jpg +2105.jpg +12550.jpg +22051.jpg +8131.jpg +1031.jpg +20506.jpg +14254.jpg +221.jpg +11556.jpg +23225.jpg +25512.jpg +10194.jpg +7238.jpg +16937.jpg +6284.jpg +11682.jpg +5691.jpg +19473.jpg +2772.jpg +26042.jpg +16989.jpg +1273.jpg +3365.jpg +12830.jpg +14976.jpg +8679.jpg +8253.jpg +17093.jpg +7587.jpg +7847.jpg +26105.jpg +5391.jpg +15203.jpg +13512.jpg +12329.jpg +24624.jpg +972.jpg +4663.jpg +12394.jpg +23645.jpg +25011.jpg +23758.jpg +14694.jpg +22548.jpg +28988.jpg +11571.jpg +24117.jpg +13318.jpg +29734.jpg +11129.jpg +5209.jpg +13621.jpg +13275.jpg +17516.jpg +8686.jpg +12059.jpg +5531.jpg +22588.jpg +23712.jpg +18531.jpg +25882.jpg +5314.jpg +2507.jpg +7633.jpg +25245.jpg +18656.jpg +22442.jpg +20509.jpg +5128.jpg +14997.jpg +20579.jpg +2006.jpg +6850.jpg +2300.jpg +6798.jpg +16082.jpg +955.jpg +18026.jpg +10219.jpg +21706.jpg +28284.jpg +6312.jpg +10261.jpg +17393.jpg +2709.jpg +15450.jpg +15479.jpg +12316.jpg +6301.jpg +552.jpg +5808.jpg +9041.jpg +21815.jpg +28328.jpg +19684.jpg +10872.jpg +9972.jpg +26720.jpg +6241.jpg +17888.jpg +21841.jpg +7635.jpg +9606.jpg +29025.jpg +19556.jpg +7179.jpg +38.jpg +18021.jpg +18205.jpg +8674.jpg +9193.jpg +23085.jpg +9662.jpg +25070.jpg +26351.jpg +2374.jpg +9726.jpg +10220.jpg +25835.jpg +22140.jpg +1039.jpg +3900.jpg +16505.jpg +15056.jpg +26373.jpg +9320.jpg +4760.jpg +17813.jpg +2380.jpg +21357.jpg +14182.jpg +4907.jpg +8115.jpg +21763.jpg +18420.jpg +3253.jpg +3771.jpg +3409.jpg +9173.jpg +10900.jpg +24925.jpg +8205.jpg +25128.jpg +13835.jpg +2802.jpg +21077.jpg +28404.jpg +16861.jpg +16882.jpg +1889.jpg +4301.jpg +14753.jpg +25356.jpg +21255.jpg +446.jpg +7483.jpg +19176.jpg +11634.jpg +5362.jpg +13091.jpg +12173.jpg +16092.jpg +1594.jpg +22016.jpg +8513.jpg +18791.jpg +24716.jpg +26388.jpg +3369.jpg +22663.jpg +12570.jpg +13000.jpg +21266.jpg +13213.jpg +28974.jpg +9128.jpg +25921.jpg +2141.jpg +26922.jpg +20740.jpg +13572.jpg +23251.jpg +10303.jpg +4317.jpg +22578.jpg +3548.jpg +29778.jpg +6480.jpg +2465.jpg +16465.jpg +17566.jpg +7357.jpg +4369.jpg +10755.jpg +28570.jpg +14161.jpg +9366.jpg +25237.jpg +18695.jpg +19525.jpg +21787.jpg +11478.jpg +22709.jpg +7414.jpg +17292.jpg +29873.jpg +8138.jpg +13297.jpg +6897.jpg +2810.jpg +1938.jpg +23362.jpg +22678.jpg +14613.jpg +5510.jpg +5484.jpg +16843.jpg +21959.jpg +23123.jpg +9592.jpg +21187.jpg +16289.jpg +9846.jpg +13333.jpg +2751.jpg +10427.jpg +28457.jpg +14945.jpg +21760.jpg +299.jpg +9045.jpg +3139.jpg +21246.jpg +3360.jpg +12062.jpg +13854.jpg +18357.jpg +18341.jpg +26945.jpg +10960.jpg +10686.jpg +18190.jpg +655.jpg +11280.jpg +2556.jpg +24777.jpg +1471.jpg +18528.jpg +25814.jpg +17110.jpg +9209.jpg +23651.jpg +19001.jpg +22689.jpg +28095.jpg +6999.jpg +3292.jpg +613.jpg +29311.jpg +27014.jpg +27547.jpg +16182.jpg +3316.jpg +1325.jpg +22070.jpg +19728.jpg +3990.jpg +9791.jpg +1788.jpg +25563.jpg +14116.jpg +2578.jpg +14390.jpg +29990.jpg +2099.jpg +28461.jpg +7523.jpg +25721.jpg +20544.jpg +22283.jpg +6815.jpg +21833.jpg +24781.jpg +29408.jpg +25367.jpg +28808.jpg +11387.jpg +23096.jpg +23573.jpg +558.jpg +11662.jpg +8082.jpg +7753.jpg +16738.jpg +7011.jpg +20074.jpg +7597.jpg +21203.jpg +15610.jpg +9580.jpg +29475.jpg +1169.jpg +24319.jpg +10091.jpg +14505.jpg +20365.jpg +5405.jpg +25819.jpg +27217.jpg +14689.jpg +26929.jpg +14639.jpg +16259.jpg +27592.jpg +20841.jpg +28126.jpg +264.jpg +3481.jpg +25990.jpg +2104.jpg +1919.jpg +11829.jpg +7725.jpg +6461.jpg +19980.jpg +7445.jpg +17462.jpg +23894.jpg +26566.jpg +10153.jpg +4718.jpg +25767.jpg +1428.jpg +11011.jpg +25904.jpg +13181.jpg +15809.jpg +17826.jpg +18750.jpg +13073.jpg +17882.jpg +9336.jpg +28312.jpg +29621.jpg +485.jpg +5372.jpg +26060.jpg +6055.jpg +7794.jpg +17574.jpg +15966.jpg +27359.jpg +24598.jpg +16454.jpg +7581.jpg +8811.jpg +4995.jpg +18876.jpg +27034.jpg +19654.jpg +19321.jpg +20198.jpg +15733.jpg +25546.jpg +7501.jpg +24711.jpg +23733.jpg +10034.jpg +11295.jpg +15089.jpg +12039.jpg +7614.jpg +17057.jpg +26969.jpg +16058.jpg +18106.jpg +25122.jpg +19935.jpg +29829.jpg +29337.jpg +11642.jpg +160.jpg +9383.jpg +20376.jpg +28176.jpg +276.jpg +14159.jpg +5833.jpg +7135.jpg +15817.jpg +15183.jpg +8825.jpg +138.jpg +8893.jpg +29905.jpg +25820.jpg +16474.jpg +1750.jpg +19459.jpg +1272.jpg +2671.jpg +16827.jpg +9695.jpg +16897.jpg +3872.jpg +28082.jpg +11717.jpg +11341.jpg +26527.jpg +1193.jpg +22395.jpg +27345.jpg +15535.jpg +15584.jpg +27754.jpg +9615.jpg +2268.jpg +3213.jpg +8250.jpg +26951.jpg +13567.jpg +19613.jpg +3681.jpg +2016.jpg +16952.jpg +22758.jpg +2490.jpg +13807.jpg +13227.jpg +19148.jpg +21431.jpg +12458.jpg +12530.jpg +28952.jpg +25790.jpg +2322.jpg +14766.jpg +24025.jpg +21080.jpg +8920.jpg +2551.jpg +24839.jpg +21844.jpg +7144.jpg +11672.jpg +1185.jpg +6177.jpg +9707.jpg +8320.jpg +22836.jpg +9950.jpg +10364.jpg +19818.jpg +21486.jpg +26912.jpg +10518.jpg +9002.jpg +6953.jpg +28154.jpg +10717.jpg +28575.jpg +16064.jpg +12512.jpg +12243.jpg +7715.jpg +6080.jpg +24625.jpg +13163.jpg +9547.jpg +27099.jpg +25949.jpg +25137.jpg +28584.jpg +5131.jpg +5317.jpg +18800.jpg +11359.jpg +22897.jpg +12995.jpg +1294.jpg +20634.jpg +19707.jpg +22333.jpg +11775.jpg +5418.jpg +733.jpg +19114.jpg +24536.jpg +8741.jpg +8085.jpg +14812.jpg +20814.jpg +28146.jpg +26899.jpg +29276.jpg +27259.jpg +7375.jpg +28249.jpg +29362.jpg +19638.jpg +21272.jpg +2730.jpg +718.jpg +9839.jpg +15154.jpg +7927.jpg +19694.jpg +11399.jpg +22835.jpg +9626.jpg +12861.jpg +26692.jpg +26793.jpg +14717.jpg +26172.jpg +16753.jpg +12298.jpg +14620.jpg +5735.jpg +17721.jpg +9432.jpg +10378.jpg +14020.jpg +722.jpg +22651.jpg +6781.jpg +23883.jpg +4548.jpg +17970.jpg +5913.jpg +4939.jpg +19825.jpg +5042.jpg +18287.jpg +13100.jpg +305.jpg +11423.jpg +25907.jpg +23353.jpg +1917.jpg +10166.jpg +27886.jpg +28175.jpg +2310.jpg +17762.jpg +741.jpg +7518.jpg +10056.jpg +18137.jpg +17032.jpg +24548.jpg +25294.jpg +1625.jpg +4586.jpg +18653.jpg +39.jpg +13206.jpg +3549.jpg +11410.jpg +19254.jpg +16291.jpg +12737.jpg +9735.jpg +20931.jpg +23031.jpg +29171.jpg +3137.jpg +13903.jpg +25068.jpg +18654.jpg +25003.jpg +26679.jpg +7680.jpg +14395.jpg +25373.jpg +21621.jpg +4971.jpg +22062.jpg +23754.jpg +18606.jpg +17156.jpg +15862.jpg +18058.jpg +19361.jpg +22770.jpg +10251.jpg +18085.jpg +13511.jpg +15018.jpg +15174.jpg +24642.jpg +22949.jpg +8500.jpg +10386.jpg +22948.jpg +21434.jpg +12809.jpg +23228.jpg +6262.jpg +11565.jpg +24012.jpg +2589.jpg +23856.jpg +5699.jpg +4712.jpg +1793.jpg +15920.jpg +20461.jpg +29076.jpg +15486.jpg +18838.jpg +18044.jpg +3420.jpg +17698.jpg +7881.jpg +8032.jpg +7102.jpg +28017.jpg +14176.jpg +21056.jpg +18633.jpg +9887.jpg +2892.jpg +3546.jpg +23548.jpg +10097.jpg +17461.jpg +24451.jpg +16586.jpg +11849.jpg +23162.jpg +13640.jpg +2667.jpg +9154.jpg +25371.jpg +9027.jpg +25841.jpg +15330.jpg +13883.jpg +11039.jpg +17137.jpg +20285.jpg +28255.jpg +6657.jpg +19173.jpg +17166.jpg +27828.jpg +1476.jpg +28724.jpg +11561.jpg +16894.jpg +6966.jpg +12503.jpg +17557.jpg +24698.jpg +11201.jpg +11421.jpg +12171.jpg +16628.jpg +3659.jpg +20842.jpg +15221.jpg +1698.jpg +28776.jpg +13347.jpg +19409.jpg +1071.jpg +5427.jpg +11375.jpg +26489.jpg +474.jpg +10732.jpg +25065.jpg +11374.jpg +27439.jpg +21456.jpg +25207.jpg +11166.jpg +20430.jpg +10314.jpg +11140.jpg +800.jpg +1780.jpg +3398.jpg +22024.jpg +25239.jpg +104.jpg +23423.jpg +3929.jpg +12446.jpg +1261.jpg +12354.jpg +6500.jpg +15605.jpg +13456.jpg +11815.jpg +29723.jpg +4524.jpg +9775.jpg +17109.jpg +24861.jpg +15253.jpg +11018.jpg +22420.jpg +11649.jpg +11851.jpg +2334.jpg +23620.jpg +3414.jpg +27178.jpg +994.jpg +5469.jpg +27562.jpg +10989.jpg +17854.jpg +3499.jpg +8144.jpg +12700.jpg +6633.jpg +26034.jpg +6806.jpg +10083.jpg +18217.jpg +13663.jpg +13386.jpg +7716.jpg +23969.jpg +12673.jpg +14582.jpg +11988.jpg +29077.jpg +27301.jpg +21491.jpg +27551.jpg +17003.jpg +26082.jpg +19979.jpg +12610.jpg +24718.jpg +5397.jpg +4218.jpg +3147.jpg +16127.jpg +17437.jpg +1557.jpg +7887.jpg +10931.jpg +6347.jpg +469.jpg +205.jpg +19769.jpg +18469.jpg +18389.jpg +5857.jpg +20642.jpg +15447.jpg +20184.jpg +2717.jpg +17042.jpg +25321.jpg +26822.jpg +12501.jpg +25450.jpg +8143.jpg +21089.jpg +17840.jpg +14562.jpg +19441.jpg +8147.jpg +12636.jpg +24594.jpg +20602.jpg +7146.jpg +14531.jpg +5603.jpg +14459.jpg +7588.jpg +23527.jpg +7912.jpg +5149.jpg +3266.jpg +2382.jpg +20205.jpg +11467.jpg +2703.jpg +14904.jpg +13873.jpg +27955.jpg +25741.jpg +13659.jpg +18808.jpg +1145.jpg +11593.jpg +1965.jpg +22715.jpg +6920.jpg +21157.jpg +17708.jpg +1018.jpg +2351.jpg +27913.jpg +4508.jpg +14270.jpg +19411.jpg +20212.jpg +24191.jpg +10743.jpg +28542.jpg +27733.jpg +23432.jpg +4950.jpg +13052.jpg +6062.jpg +7162.jpg +18450.jpg +9084.jpg +4702.jpg +24958.jpg +3177.jpg +14663.jpg +21700.jpg +13089.jpg +664.jpg +877.jpg +26409.jpg +17804.jpg +26757.jpg +26736.jpg +26329.jpg +26789.jpg +13841.jpg +7149.jpg +21517.jpg +1060.jpg +26713.jpg +27335.jpg +15107.jpg +13725.jpg +9362.jpg +14617.jpg +20943.jpg +35.jpg +1397.jpg +26360.jpg +4741.jpg +11019.jpg +27618.jpg +25172.jpg +14837.jpg +4969.jpg +9982.jpg +20878.jpg +17672.jpg +18348.jpg +10152.jpg +1454.jpg +17568.jpg +17870.jpg +8396.jpg +3716.jpg +10432.jpg +20297.jpg +16045.jpg +27389.jpg +18329.jpg +19170.jpg +28994.jpg +20827.jpg +16754.jpg +4884.jpg +22665.jpg +8749.jpg +22531.jpg +5520.jpg +19698.jpg +6273.jpg +11889.jpg +11047.jpg +25460.jpg +15303.jpg +6034.jpg +18489.jpg +12496.jpg +399.jpg +28086.jpg +29019.jpg +19857.jpg +9669.jpg +8742.jpg +26632.jpg +19687.jpg +8794.jpg +18478.jpg +18014.jpg +6708.jpg +2291.jpg +26309.jpg +24564.jpg +26601.jpg +24337.jpg +23122.jpg +14829.jpg +18574.jpg +27498.jpg +7096.jpg +3672.jpg +11835.jpg +14164.jpg +22725.jpg +22220.jpg +22707.jpg +2899.jpg +12556.jpg +13796.jpg +18338.jpg +16912.jpg +11761.jpg +26419.jpg +555.jpg +7645.jpg +21565.jpg +3763.jpg +28726.jpg +21239.jpg +4424.jpg +27055.jpg +14654.jpg +18836.jpg +28608.jpg +26563.jpg +1795.jpg +28456.jpg +11942.jpg +20587.jpg +22530.jpg +6463.jpg +5054.jpg +25443.jpg +28489.jpg +11643.jpg +6756.jpg +903.jpg +1442.jpg +14163.jpg +17112.jpg +11298.jpg +2550.jpg +17248.jpg +7689.jpg +11340.jpg +2477.jpg +7814.jpg +15470.jpg +19401.jpg +7700.jpg +14281.jpg +24574.jpg +5930.jpg +1162.jpg +10677.jpg +1054.jpg +8915.jpg +12224.jpg +3206.jpg +7016.jpg +1168.jpg +13245.jpg +879.jpg +18611.jpg +26733.jpg +17386.jpg +21897.jpg +16074.jpg +12321.jpg +28973.jpg +16036.jpg +7189.jpg +25833.jpg +21928.jpg +12149.jpg +22498.jpg +5552.jpg +14079.jpg +28371.jpg +26070.jpg +10304.jpg +8908.jpg +22194.jpg +26335.jpg +20500.jpg +4767.jpg +3300.jpg +7333.jpg +25989.jpg +24542.jpg +14016.jpg +9601.jpg +21819.jpg +19120.jpg +21660.jpg +14014.jpg +15140.jpg +29345.jpg +4621.jpg +5419.jpg +14576.jpg +910.jpg +16329.jpg +3150.jpg +8343.jpg +5339.jpg +11432.jpg +8436.jpg +26677.jpg +11473.jpg +18997.jpg +13168.jpg +3747.jpg +11810.jpg +23741.jpg +4382.jpg +17266.jpg +12361.jpg +19225.jpg +12050.jpg +17176.jpg +28414.jpg +24309.jpg +6164.jpg +5557.jpg +17622.jpg +11146.jpg +8189.jpg +6473.jpg +20868.jpg +20391.jpg +15143.jpg +15192.jpg +21966.jpg +26128.jpg +10115.jpg +6685.jpg +26618.jpg +20129.jpg +26212.jpg +13631.jpg +20731.jpg +6149.jpg +28799.jpg +13995.jpg +25629.jpg +27407.jpg +24686.jpg +18682.jpg +11557.jpg +23151.jpg +15574.jpg +19211.jpg +8944.jpg +29183.jpg +17889.jpg +1887.jpg +17343.jpg +7253.jpg +17707.jpg +19664.jpg +15897.jpg +15731.jpg +2839.jpg +14668.jpg +8713.jpg +11631.jpg +17123.jpg +25438.jpg +18484.jpg +1252.jpg +4135.jpg +27620.jpg +20560.jpg +16157.jpg +14946.jpg +13983.jpg +26682.jpg +17569.jpg +21581.jpg +17649.jpg +13065.jpg +988.jpg +27334.jpg +11638.jpg +15542.jpg +26762.jpg +12561.jpg +22610.jpg +7496.jpg +23386.jpg +6020.jpg +12892.jpg +6180.jpg +5762.jpg +10710.jpg +3728.jpg +10817.jpg +26502.jpg +2365.jpg +7272.jpg +385.jpg +13187.jpg +16909.jpg +2371.jpg +6094.jpg +21590.jpg +20837.jpg +28445.jpg +5968.jpg +25630.jpg +406.jpg +29709.jpg +2512.jpg +23268.jpg +8456.jpg +25698.jpg +19817.jpg +24847.jpg +11841.jpg +3991.jpg +19431.jpg +12215.jpg +9222.jpg +16670.jpg +24533.jpg +19802.jpg +21639.jpg +25995.jpg +17528.jpg +20164.jpg +17278.jpg +16803.jpg +6642.jpg +19573.jpg +4590.jpg +19298.jpg +25673.jpg +19995.jpg +25998.jpg +4653.jpg +29986.jpg +8181.jpg +15077.jpg +1984.jpg +13920.jpg +19044.jpg +1991.jpg +6395.jpg +27770.jpg +274.jpg +18029.jpg +6629.jpg +15951.jpg +8035.jpg +21387.jpg +4787.jpg +12990.jpg +583.jpg +17354.jpg +14120.jpg +14095.jpg +13336.jpg +17769.jpg +6626.jpg +24741.jpg +26565.jpg +16862.jpg +6564.jpg +29861.jpg +7734.jpg +13066.jpg +1766.jpg +7287.jpg +3973.jpg +12070.jpg +27801.jpg +25624.jpg +25277.jpg +10549.jpg +163.jpg +27221.jpg +14558.jpg +3088.jpg +10040.jpg +23528.jpg +23142.jpg +13546.jpg +28596.jpg +2469.jpg +29326.jpg +4436.jpg +15714.jpg +24151.jpg +27808.jpg +29342.jpg +12733.jpg +13745.jpg +5321.jpg +22849.jpg +600.jpg +5862.jpg +28117.jpg +28078.jpg +1904.jpg +11.jpg +27451.jpg +27025.jpg +29746.jpg +23.jpg +4556.jpg +26796.jpg +688.jpg +27035.jpg +6967.jpg +24823.jpg +18043.jpg +9616.jpg +16416.jpg +21618.jpg +11871.jpg +19841.jpg +20806.jpg +17577.jpg +10172.jpg +8251.jpg +14050.jpg +27039.jpg +13471.jpg +16494.jpg +23992.jpg +7298.jpg +25129.jpg +2149.jpg +23013.jpg +3792.jpg +1376.jpg +26101.jpg +11890.jpg +18143.jpg +9941.jpg +12800.jpg +13584.jpg +5244.jpg +18107.jpg +2359.jpg +2683.jpg +7651.jpg +6858.jpg +22074.jpg +14000.jpg +11393.jpg +4750.jpg +15874.jpg +24484.jpg +10417.jpg +19871.jpg +20022.jpg +3555.jpg +3025.jpg +10297.jpg +20572.jpg +25533.jpg +21935.jpg +13138.jpg +23108.jpg +24381.jpg +2906.jpg +3797.jpg +25283.jpg +15118.jpg +11559.jpg +3289.jpg +8868.jpg +6389.jpg +12310.jpg +4915.jpg +29891.jpg +8093.jpg +6251.jpg +17573.jpg +28772.jpg +23485.jpg +1383.jpg +22496.jpg +13294.jpg +11189.jpg +29391.jpg +6923.jpg +20614.jpg +24579.jpg +25999.jpg +26343.jpg +1072.jpg +19752.jpg +27565.jpg +12134.jpg +25436.jpg +19187.jpg +5673.jpg +14174.jpg +15338.jpg +28391.jpg +29065.jpg +16837.jpg +24591.jpg +26616.jpg +11014.jpg +11104.jpg +16021.jpg +7692.jpg +19087.jpg +28571.jpg +29817.jpg +6011.jpg +621.jpg +20974.jpg +25909.jpg +2728.jpg +3528.jpg +20086.jpg +3703.jpg +1230.jpg +3998.jpg +26935.jpg +5090.jpg +12151.jpg +21794.jpg +20942.jpg +3482.jpg +8341.jpg +10622.jpg +24296.jpg +25565.jpg +7435.jpg +5107.jpg +21008.jpg +1360.jpg +2511.jpg +6120.jpg +6292.jpg +22204.jpg +17428.jpg +26909.jpg +17000.jpg +3633.jpg +999.jpg +934.jpg +3662.jpg +2383.jpg +20315.jpg +20053.jpg +24302.jpg +10813.jpg +18074.jpg +2798.jpg +23864.jpg +4881.jpg +29762.jpg +8558.jpg +3639.jpg +15132.jpg +9058.jpg +10921.jpg +23554.jpg +15096.jpg +29365.jpg +27062.jpg +13050.jpg +4516.jpg +3462.jpg +2983.jpg +25238.jpg +6413.jpg +23960.jpg +4678.jpg +7227.jpg +13548.jpg +27394.jpg +22646.jpg +1782.jpg +9740.jpg +26938.jpg +21750.jpg +12672.jpg +7561.jpg +25232.jpg +22986.jpg +11595.jpg +19639.jpg +9765.jpg +16060.jpg +17815.jpg +13726.jpg +18246.jpg +18334.jpg +26075.jpg +323.jpg +19528.jpg +26828.jpg +7474.jpg +4127.jpg +3393.jpg +28508.jpg +8256.jpg +16975.jpg +10626.jpg +248.jpg +706.jpg +26749.jpg +10672.jpg +10692.jpg +17187.jpg +7493.jpg +20822.jpg +16387.jpg +4640.jpg +22608.jpg +27206.jpg +4454.jpg +23701.jpg +10476.jpg +28549.jpg +3544.jpg +445.jpg +8314.jpg +9522.jpg +25568.jpg +26619.jpg +27944.jpg +2594.jpg +16873.jpg +10786.jpg +26294.jpg +18136.jpg +9267.jpg +2073.jpg +662.jpg +21111.jpg +28493.jpg +5738.jpg +2401.jpg +12308.jpg +8455.jpg +23985.jpg +18631.jpg +25513.jpg +15759.jpg +8118.jpg +18251.jpg +10295.jpg +27275.jpg +22380.jpg +24404.jpg +14037.jpg +8806.jpg +24286.jpg +15395.jpg +4396.jpg +4483.jpg +29542.jpg +25977.jpg +16842.jpg +9499.jpg +22907.jpg +20588.jpg +20221.jpg +23231.jpg +23915.jpg +25917.jpg +21049.jpg +1242.jpg +6890.jpg +1296.jpg +19938.jpg +17890.jpg +23962.jpg +4815.jpg +17116.jpg +19672.jpg +8977.jpg +99.jpg +19534.jpg +15551.jpg +19514.jpg +22587.jpg +28125.jpg +22127.jpg +26650.jpg +2421.jpg +22872.jpg +17217.jpg +26450.jpg +23492.jpg +28999.jpg +21475.jpg +20477.jpg +14911.jpg +19064.jpg +11217.jpg +3630.jpg +26831.jpg +21715.jpg +27978.jpg +10347.jpg +21390.jpg +17289.jpg +20889.jpg +6538.jpg +15040.jpg +8918.jpg +14025.jpg +21353.jpg +20253.jpg +13527.jpg +23384.jpg +29517.jpg +23302.jpg +23813.jpg +25532.jpg +26376.jpg +29451.jpg +17627.jpg +8476.jpg +15621.jpg +1696.jpg +2627.jpg +16826.jpg +15749.jpg +3336.jpg +7682.jpg +1171.jpg +7031.jpg +12130.jpg +19247.jpg +24091.jpg +12285.jpg +3534.jpg +29212.jpg +17340.jpg +11179.jpg +20316.jpg +21712.jpg +1313.jpg +6091.jpg +3262.jpg +8099.jpg +18335.jpg +28529.jpg +11767.jpg +17005.jpg +905.jpg +11679.jpg +24910.jpg +20044.jpg +4465.jpg +1878.jpg +1479.jpg +13886.jpg +11850.jpg +6662.jpg +3827.jpg +25671.jpg +27094.jpg +11306.jpg +27218.jpg +22975.jpg +11786.jpg +21696.jpg +21524.jpg +24083.jpg +2369.jpg +2198.jpg +21225.jpg +22832.jpg +10449.jpg +16675.jpg +15099.jpg +9177.jpg +29219.jpg +10110.jpg +1499.jpg +14555.jpg +20115.jpg +20702.jpg +16838.jpg +4694.jpg +20750.jpg +19010.jpg +20005.jpg +23769.jpg +14137.jpg +4785.jpg +1216.jpg +1307.jpg +15339.jpg +12276.jpg +7790.jpg +4258.jpg +8457.jpg +23230.jpg +14061.jpg +19383.jpg +25270.jpg +3725.jpg +6095.jpg +27332.jpg +1918.jpg +22098.jpg +4474.jpg +1978.jpg +5182.jpg +593.jpg +19977.jpg +13020.jpg +9978.jpg +8637.jpg +3021.jpg +686.jpg +11305.jpg +6832.jpg +917.jpg +11154.jpg +27377.jpg +651.jpg +12006.jpg +7622.jpg +3707.jpg +5417.jpg +18968.jpg +1773.jpg +919.jpg +8980.jpg +27088.jpg +15505.jpg +7383.jpg +9373.jpg +15530.jpg +1282.jpg +26292.jpg +23923.jpg +29997.jpg +3402.jpg +11962.jpg +8113.jpg +12998.jpg +28059.jpg +7025.jpg +13366.jpg +23380.jpg +24998.jpg +11183.jpg +5141.jpg +18066.jpg +12360.jpg +22386.jpg +25132.jpg +7088.jpg +480.jpg +3186.jpg +499.jpg +8098.jpg +23265.jpg +3888.jpg +18748.jpg +16085.jpg +19070.jpg +24213.jpg +15017.jpg +12491.jpg +29200.jpg +9435.jpg +7352.jpg +14312.jpg +7323.jpg +18112.jpg +10467.jpg +8039.jpg +14363.jpg +8268.jpg +16321.jpg +1926.jpg +12685.jpg +4233.jpg +2448.jpg +29897.jpg +28893.jpg +1619.jpg +26259.jpg +20007.jpg +25677.jpg +8114.jpg +21039.jpg +13588.jpg +21442.jpg +23364.jpg +26240.jpg +15427.jpg +4030.jpg +10425.jpg +27927.jpg +20262.jpg +28729.jpg +8247.jpg +12274.jpg +28921.jpg +16191.jpg +15972.jpg +1571.jpg +5032.jpg +25991.jpg +9281.jpg +7416.jpg +976.jpg +24258.jpg +26748.jpg +12804.jpg +5376.jpg +4045.jpg +12093.jpg +10507.jpg +4266.jpg +23666.jpg +25216.jpg +6544.jpg +22818.jpg +22941.jpg +22426.jpg +13282.jpg +16177.jpg +9286.jpg +28986.jpg +25894.jpg +18584.jpg +4297.jpg +28346.jpg +23149.jpg +16243.jpg +14248.jpg +24732.jpg +7510.jpg +7829.jpg +22419.jpg +12237.jpg +4203.jpg +9273.jpg +11612.jpg +13011.jpg +1178.jpg +21613.jpg +25013.jpg +27915.jpg +20257.jpg +22819.jpg +13288.jpg +26326.jpg +25468.jpg +13857.jpg +2269.jpg +18571.jpg +7671.jpg +17590.jpg +25683.jpg +22391.jpg +15800.jpg +6571.jpg +13334.jpg +4714.jpg +8665.jpg +6523.jpg +8796.jpg +8948.jpg +3039.jpg +29418.jpg +14651.jpg +1789.jpg +2884.jpg +27344.jpg +3362.jpg +6916.jpg +27038.jpg +14180.jpg +26846.jpg +4181.jpg +28627.jpg +2743.jpg +1392.jpg +8843.jpg +27026.jpg +906.jpg +2111.jpg +9310.jpg +20554.jpg +12702.jpg +5655.jpg +2980.jpg +24553.jpg +25975.jpg +28464.jpg +11205.jpg +9291.jpg +3172.jpg +25161.jpg +15518.jpg +12625.jpg +28314.jpg +27616.jpg +29049.jpg +19172.jpg +645.jpg +13694.jpg +27486.jpg +6784.jpg +22452.jpg +7200.jpg +886.jpg +5809.jpg +182.jpg +24703.jpg +15209.jpg +29084.jpg +29468.jpg +26689.jpg +5277.jpg +20003.jpg +29052.jpg +5163.jpg +9256.jpg +6140.jpg +8819.jpg +28141.jpg +28684.jpg +18844.jpg +6424.jpg +7198.jpg +26662.jpg +13504.jpg +10613.jpg +26054.jpg +28738.jpg +9353.jpg +22486.jpg +29899.jpg +18167.jpg +6907.jpg +17152.jpg +24776.jpg +17525.jpg +3609.jpg +14478.jpg +15449.jpg +22726.jpg +11056.jpg +16480.jpg +15363.jpg +26599.jpg +10751.jpg +16189.jpg +18055.jpg +9995.jpg +13731.jpg +6547.jpg +17694.jpg +12414.jpg +16146.jpg +15127.jpg +5461.jpg +8579.jpg +13794.jpg +16982.jpg +2204.jpg +16305.jpg +1730.jpg +8990.jpg +28439.jpg +20603.jpg +8158.jpg +4338.jpg +24828.jpg +12114.jpg +10132.jpg +25219.jpg +728.jpg +13032.jpg +923.jpg +21965.jpg +29004.jpg +27897.jpg +14637.jpg +21512.jpg +29806.jpg +8851.jpg +29652.jpg +28088.jpg +21733.jpg +1035.jpg +14062.jpg +25139.jpg +12221.jpg +23203.jpg +29776.jpg +18282.jpg +16385.jpg +28308.jpg +10519.jpg +25204.jpg +20492.jpg +7897.jpg +13242.jpg +14681.jpg +3284.jpg +12540.jpg +19905.jpg +12449.jpg +6139.jpg +19095.jpg +6636.jpg +26751.jpg +19050.jpg +25315.jpg +22964.jpg +9939.jpg +22314.jpg +16205.jpg +11153.jpg +29689.jpg +19291.jpg +4555.jpg +17498.jpg +21609.jpg +8625.jpg +4952.jpg +15599.jpg +4503.jpg +28045.jpg +13607.jpg +9960.jpg +20902.jpg +26416.jpg +21043.jpg +8853.jpg +26200.jpg +27414.jpg +12895.jpg +19049.jpg +4292.jpg +11911.jpg +11100.jpg +25177.jpg +5682.jpg +951.jpg +12234.jpg +12756.jpg +16190.jpg +24911.jpg +4077.jpg +22056.jpg +8874.jpg +8668.jpg +7168.jpg +28245.jpg +10703.jpg +12602.jpg +27193.jpg +3200.jpg +10092.jpg +17190.jpg +17950.jpg +16619.jpg +24153.jpg +27143.jpg +8818.jpg +15230.jpg +6285.jpg +29782.jpg +597.jpg +3518.jpg +7420.jpg +20960.jpg +16183.jpg +10969.jpg +23862.jpg +9865.jpg +25220.jpg +13415.jpg +18519.jpg +4716.jpg +27124.jpg +18250.jpg +22453.jpg +6430.jpg +11916.jpg +6773.jpg +19372.jpg +210.jpg +18960.jpg +15452.jpg +887.jpg +28929.jpg +14942.jpg +21547.jpg +12246.jpg +20321.jpg +1229.jpg +25605.jpg +5204.jpg +18343.jpg +25421.jpg +9934.jpg +12253.jpg +5058.jpg +22586.jpg +8321.jpg +2951.jpg +16256.jpg +11801.jpg +27574.jpg +11138.jpg +2761.jpg +23463.jpg +17561.jpg +17012.jpg +11730.jpg +12413.jpg +16571.jpg +16390.jpg +1538.jpg +12898.jpg +15385.jpg +25383.jpg +14622.jpg +12801.jpg +12145.jpg +19644.jpg +27005.jpg +1959.jpg +7495.jpg +29207.jpg +6118.jpg +1455.jpg +6308.jpg +14141.jpg +21264.jpg +5811.jpg +9614.jpg +28092.jpg +10890.jpg +25868.jpg +4347.jpg +18988.jpg +12484.jpg +1503.jpg +3486.jpg +6162.jpg +5543.jpg +15836.jpg +23242.jpg +12565.jpg +472.jpg +3675.jpg +3319.jpg +9881.jpg +26037.jpg +25304.jpg +18019.jpg +11267.jpg +9003.jpg +11209.jpg +25838.jpg +3997.jpg +5578.jpg +8614.jpg +1399.jpg +22823.jpg +8383.jpg +28771.jpg +1284.jpg +22962.jpg +24705.jpg +3170.jpg +2685.jpg +13202.jpg +4435.jpg +7084.jpg +4311.jpg +10001.jpg +11175.jpg +26162.jpg +6939.jpg +19696.jpg +5856.jpg +25166.jpg +23237.jpg +20189.jpg +20565.jpg +29661.jpg +3196.jpg +18211.jpg +5185.jpg +2218.jpg +19922.jpg +22686.jpg +29507.jpg +14608.jpg +9342.jpg +2962.jpg +20507.jpg +12510.jpg +26526.jpg +10766.jpg +26187.jpg +25208.jpg +8567.jpg +3718.jpg +18157.jpg +6824.jpg +15824.jpg +24351.jpg +29230.jpg +20570.jpg +16046.jpg +8911.jpg +3354.jpg +3773.jpg +15895.jpg +11827.jpg +7748.jpg +17511.jpg +5997.jpg +12127.jpg +59.jpg +3203.jpg +13383.jpg +23625.jpg +21330.jpg +24291.jpg +29412.jpg +22470.jpg +13462.jpg +5116.jpg +1300.jpg +22446.jpg +7629.jpg +18514.jpg +14642.jpg +2349.jpg +283.jpg +17055.jpg +29476.jpg +5685.jpg +24558.jpg +11171.jpg +18949.jpg +28151.jpg +26860.jpg +20435.jpg +19721.jpg +23759.jpg +16233.jpg +22306.jpg +27318.jpg +16822.jpg +18930.jpg +27446.jpg +27233.jpg +20248.jpg +13442.jpg +17157.jpg +19730.jpg +20657.jpg +2293.jpg +28098.jpg +596.jpg +25505.jpg +17133.jpg +25664.jpg +4196.jpg +29789.jpg +12105.jpg +13476.jpg +29037.jpg +21711.jpg +8552.jpg +21876.jpg +25879.jpg +7562.jpg +6044.jpg +465.jpg +17759.jpg +15782.jpg +6924.jpg +2118.jpg +1568.jpg +21210.jpg +29600.jpg +3184.jpg +2978.jpg +27267.jpg +16534.jpg +11960.jpg +27373.jpg +805.jpg +23399.jpg +7625.jpg +29302.jpg +26635.jpg +29066.jpg +23799.jpg +8656.jpg +15896.jpg +15284.jpg +13251.jpg +10103.jpg +14431.jpg +19000.jpg +5138.jpg +29032.jpg +11238.jpg +28203.jpg +13326.jpg +27456.jpg +13686.jpg +2306.jpg +11449.jpg +11457.jpg +17230.jpg +10020.jpg +5041.jpg +29424.jpg +4860.jpg +19427.jpg +5958.jpg +7718.jpg +11691.jpg +20133.jpg +18497.jpg +11575.jpg +22511.jpg +1638.jpg +24788.jpg +16655.jpg +3494.jpg +5960.jpg +27271.jpg +12165.jpg +7533.jpg +5545.jpg +26106.jpg +29722.jpg +25058.jpg +20424.jpg +28497.jpg +22084.jpg +3520.jpg +20330.jpg +21468.jpg +14361.jpg +25377.jpg +21743.jpg +26206.jpg +11109.jpg +22940.jpg +19180.jpg +16605.jpg +8179.jpg +3928.jpg +19775.jpg +7815.jpg +1985.jpg +19219.jpg +9223.jpg +12356.jpg +5033.jpg +14565.jpg +25370.jpg +19765.jpg +24035.jpg +11176.jpg +10348.jpg +10780.jpg +9192.jpg +22966.jpg +28822.jpg +16280.jpg +5961.jpg +24007.jpg +16005.jpg +5377.jpg +26816.jpg +2602.jpg +2462.jpg +21657.jpg +20516.jpg +12397.jpg +14789.jpg +8854.jpg +5635.jpg +19524.jpg +19759.jpg +10721.jpg +29971.jpg +13267.jpg +26429.jpg +17954.jpg +17084.jpg +22810.jpg +22344.jpg +2134.jpg +15855.jpg +9649.jpg +3187.jpg +13489.jpg +10111.jpg +2606.jpg +16094.jpg +13078.jpg +4288.jpg +27458.jpg +2985.jpg +17469.jpg +22896.jpg +7026.jpg +26832.jpg +11315.jpg +27830.jpg +27244.jpg +1740.jpg +20160.jpg +12989.jpg +1356.jpg +28430.jpg +11308.jpg +5456.jpg +10808.jpg +18990.jpg +24232.jpg +17996.jpg +13782.jpg +1154.jpg +7637.jpg +18921.jpg +11287.jpg +6277.jpg +18596.jpg +12137.jpg +9060.jpg +21386.jpg +28816.jpg +2631.jpg +15319.jpg +27798.jpg +20229.jpg +27488.jpg +8270.jpg +21366.jpg +3726.jpg +11285.jpg +22168.jpg +7728.jpg +23131.jpg +8235.jpg +25954.jpg +19451.jpg +8906.jpg +26198.jpg +25150.jpg +12213.jpg +16780.jpg +18309.jpg +4096.jpg +25811.jpg +7176.jpg +24902.jpg +12226.jpg +20962.jpg +25736.jpg +23311.jpg +9053.jpg +12057.jpg +27594.jpg +3526.jpg +9943.jpg +2577.jpg +24768.jpg +754.jpg +10406.jpg +3724.jpg +26574.jpg +17117.jpg +24657.jpg +7898.jpg +11606.jpg +5375.jpg +3889.jpg +28736.jpg +25061.jpg +26923.jpg +2777.jpg +21188.jpg +2262.jpg +17141.jpg +3868.jpg +24225.jpg +867.jpg +16248.jpg +21496.jpg +2097.jpg +8372.jpg +5500.jpg +19646.jpg +28695.jpg +5772.jpg +23121.jpg +2641.jpg +19888.jpg +25617.jpg +19968.jpg +21822.jpg +29085.jpg +27647.jpg +19204.jpg +17374.jpg +26613.jpg +11044.jpg +938.jpg +6822.jpg +20795.jpg +25680.jpg +12543.jpg +26090.jpg +5192.jpg +7980.jpg +27910.jpg +22360.jpg +6955.jpg +20283.jpg +28150.jpg +20805.jpg +8243.jpg +9488.jpg +13570.jpg +8849.jpg +27067.jpg +10192.jpg +24632.jpg +28206.jpg +19768.jpg +6083.jpg +12197.jpg +22160.jpg +28172.jpg +6192.jpg +20196.jpg +16782.jpg +5836.jpg +5232.jpg +3258.jpg +25745.jpg +23487.jpg +22906.jpg +8690.jpg +4068.jpg +15615.jpg +495.jpg +9368.jpg +8738.jpg +21437.jpg +21417.jpg +5366.jpg +974.jpg +4322.jpg +25439.jpg +17422.jpg +22617.jpg +6330.jpg +9393.jpg +5280.jpg +23533.jpg +18120.jpg +10610.jpg +11325.jpg +29838.jpg +19.jpg +22256.jpg +20650.jpg +2757.jpg +21827.jpg +24667.jpg +3932.jpg +10514.jpg +15066.jpg +20924.jpg +3582.jpg +24633.jpg +27211.jpg +29047.jpg +11976.jpg +18804.jpg +25696.jpg +26771.jpg +23036.jpg +9015.jpg +18377.jpg +4655.jpg +22015.jpg +16761.jpg +15150.jpg +1729.jpg +18.jpg +16026.jpg +29705.jpg +26288.jpg +19308.jpg +4142.jpg +10249.jpg +4762.jpg +15481.jpg +5948.jpg +24892.jpg +23352.jpg +19549.jpg +1221.jpg +22594.jpg +11113.jpg +24596.jpg +20701.jpg +2054.jpg +3684.jpg +4173.jpg +21598.jpg +9297.jpg +29164.jpg +21627.jpg +23140.jpg +28.jpg +15758.jpg +23099.jpg +5493.jpg +11281.jpg +3833.jpg +3042.jpg +9591.jpg +3314.jpg +12999.jpg +21059.jpg +13924.jpg +96.jpg +28881.jpg +6988.jpg +2474.jpg +15000.jpg +21324.jpg +5776.jpg +20890.jpg +16893.jpg +4953.jpg +25022.jpg +26458.jpg +27760.jpg +15633.jpg +18212.jpg +6628.jpg +18834.jpg +13190.jpg +16725.jpg +10298.jpg +27304.jpg +4040.jpg +10521.jpg +26432.jpg +25508.jpg +3960.jpg +80.jpg +12901.jpg +10138.jpg +21887.jpg +4046.jpg +21624.jpg +14019.jpg +12147.jpg +2990.jpg +20813.jpg +13633.jpg +14633.jpg +13923.jpg +4723.jpg +5088.jpg +27996.jpg +15439.jpg +6880.jpg +1630.jpg +12850.jpg +12688.jpg +9460.jpg +1657.jpg +10943.jpg +259.jpg +15525.jpg +18042.jpg +11162.jpg +9269.jpg +3938.jpg +22134.jpg +1767.jpg +5399.jpg +10791.jpg +27102.jpg +5205.jpg +25758.jpg +14514.jpg +25190.jpg +311.jpg +12876.jpg +7321.jpg +29416.jpg +2261.jpg +1577.jpg +8896.jpg +11545.jpg +14711.jpg +22284.jpg +16143.jpg +16406.jpg +12303.jpg +11390.jpg +6265.jpg +5874.jpg +19056.jpg +3476.jpg +24365.jpg +15264.jpg +24508.jpg +8357.jpg +1367.jpg +17644.jpg +25180.jpg +20948.jpg +15117.jpg +28589.jpg +21259.jpg +27057.jpg +20239.jpg +6367.jpg +5528.jpg +14739.jpg +6160.jpg +8760.jpg +17214.jpg +9421.jpg +15466.jpg +8263.jpg +17743.jpg +26615.jpg +21110.jpg +25305.jpg +28938.jpg +26056.jpg +23229.jpg +7438.jpg +11616.jpg +2100.jpg +18725.jpg +159.jpg +20123.jpg +9784.jpg +28042.jpg +6589.jpg +19885.jpg +16445.jpg +9304.jpg +11697.jpg +28949.jpg +23626.jpg +25488.jpg +11622.jpg +1999.jpg +1498.jpg +3558.jpg +2904.jpg +11898.jpg +6887.jpg +9242.jpg +16864.jpg +12193.jpg +24386.jpg +20484.jpg +16540.jpg +22069.jpg +2967.jpg +8761.jpg +24907.jpg +14830.jpg +14076.jpg +18992.jpg +8008.jpg +24283.jpg +3942.jpg +25557.jpg +19038.jpg +5126.jpg +14653.jpg +10632.jpg +26079.jpg +6131.jpg +27753.jpg +27151.jpg +8979.jpg +24096.jpg +15831.jpg +9096.jpg +14030.jpg +962.jpg +17002.jpg +18229.jpg +12376.jpg +28755.jpg +23975.jpg +25840.jpg +6980.jpg +12773.jpg +2668.jpg +2850.jpg +4978.jpg +17749.jpg +7461.jpg +25554.jpg +1119.jpg +19670.jpg +16954.jpg +27431.jpg +23296.jpg +25504.jpg +2177.jpg +19126.jpg +8459.jpg +13355.jpg +11573.jpg +21345.jpg +15843.jpg +13204.jpg +20174.jpg +26961.jpg +27327.jpg +19725.jpg +7676.jpg +507.jpg +27006.jpg +17519.jpg +29798.jpg +17522.jpg +18172.jpg +702.jpg +23632.jpg +19878.jpg +24318.jpg +17650.jpg +21949.jpg +1408.jpg +27603.jpg +29279.jpg +13712.jpg +4557.jpg +23169.jpg +18447.jpg +15596.jpg +6315.jpg +14185.jpg +17288.jpg +11327.jpg +28401.jpg +386.jpg +684.jpg +20051.jpg +14267.jpg +196.jpg +18795.jpg +22616.jpg +1045.jpg +26176.jpg +11554.jpg +13943.jpg +19351.jpg +7740.jpg +3650.jpg +23719.jpg +17892.jpg +21270.jpg +14779.jpg +25282.jpg +14295.jpg +22078.jpg +18330.jpg +24814.jpg +27107.jpg +7351.jpg +16443.jpg +918.jpg +8153.jpg +10773.jpg +16293.jpg +27555.jpg +2341.jpg +15104.jpg +27678.jpg +16296.jpg +1836.jpg +19275.jpg +8260.jpg +19388.jpg +20851.jpg +4967.jpg +5972.jpg +23173.jpg +14087.jpg +20420.jpg +29434.jpg +21916.jpg +15482.jpg +10707.jpg +18699.jpg +10296.jpg +12786.jpg +25940.jpg +20380.jpg +20885.jpg +8680.jpg +26578.jpg +22040.jpg +26752.jpg +25226.jpg +4250.jpg +2165.jpg +5749.jpg +27977.jpg +29985.jpg +4360.jpg +27403.jpg +4688.jpg +21102.jpg +3464.jpg +796.jpg +6498.jpg +3345.jpg +29115.jpg +13475.jpg +1179.jpg +26228.jpg +29182.jpg +5485.jpg +18662.jpg +25033.jpg +18796.jpg +12599.jpg +22494.jpg +24396.jpg +3646.jpg +14715.jpg +18847.jpg +25714.jpg +15982.jpg +921.jpg +25679.jpg +11480.jpg +16726.jpg +6254.jpg +23010.jpg +27773.jpg +23655.jpg +15102.jpg +407.jpg +16926.jpg +25747.jpg +1215.jpg +8277.jpg +8018.jpg +11241.jpg +10387.jpg +10877.jpg +20008.jpg +2081.jpg +9940.jpg +24712.jpg +2265.jpg +19260.jpg +16038.jpg +26581.jpg +22767.jpg +5959.jpg +3699.jpg +3826.jpg +2711.jpg +12363.jpg +14430.jpg +5174.jpg +14253.jpg +21818.jpg +7171.jpg +5147.jpg +6680.jpg +20170.jpg +24196.jpg +14339.jpg +23224.jpg +13079.jpg +2545.jpg +3862.jpg +11542.jpg +9554.jpg +6057.jpg +12177.jpg +28733.jpg +17765.jpg +29931.jpg +9447.jpg +20247.jpg +19572.jpg +19597.jpg +24184.jpg +17030.jpg +25408.jpg +8307.jpg +25964.jpg +14232.jpg +9217.jpg +27060.jpg +3597.jpg +4707.jpg +6406.jpg +2634.jpg +7343.jpg +10961.jpg +21191.jpg +10802.jpg +9080.jpg +8937.jpg +29874.jpg +17639.jpg +28960.jpg +6368.jpg +12854.jpg +1675.jpg +29537.jpg +8266.jpg +13025.jpg +27053.jpg +2698.jpg +20429.jpg +16695.jpg +21317.jpg +24395.jpg +22059.jpg +8975.jpg +14917.jpg +16769.jpg +4223.jpg +3135.jpg +17955.jpg +6222.jpg +9878.jpg +15490.jpg +4037.jpg +15315.jpg +6214.jpg +27597.jpg +13783.jpg +18207.jpg +18658.jpg +17311.jpg +2645.jpg +306.jpg +21325.jpg +23359.jpg +1515.jpg +19281.jpg +13855.jpg +19358.jpg +12630.jpg +2190.jpg +24733.jpg +27047.jpg +26175.jpg +10620.jpg +13988.jpg +12671.jpg +14822.jpg +7327.jpg +2811.jpg +12866.jpg +17221.jpg +8717.jpg +7329.jpg +9850.jpg +12795.jpg +20683.jpg +3736.jpg +29340.jpg +4942.jpg +17884.jpg +25316.jpg +6252.jpg +15739.jpg +16539.jpg +29455.jpg +2948.jpg +12652.jpg +6840.jpg +369.jpg +1342.jpg +13718.jpg +7784.jpg +27042.jpg +22765.jpg +29243.jpg +10628.jpg +15734.jpg +9830.jpg +5626.jpg +22008.jpg +18665.jpg +19678.jpg +9819.jpg +26399.jpg +28011.jpg +5846.jpg +27506.jpg +4177.jpg +4039.jpg +10895.jpg +6786.jpg +25028.jpg +25880.jpg +26625.jpg +8173.jpg +21356.jpg +840.jpg +7431.jpg +21439.jpg +18259.jpg +20049.jpg +4820.jpg +6752.jpg +17088.jpg +26746.jpg +958.jpg +3617.jpg +11151.jpg +27076.jpg +11184.jpg +25094.jpg +11198.jpg +18809.jpg +13878.jpg +19619.jpg +25456.jpg +20154.jpg +13985.jpg +18517.jpg +7745.jpg +16487.jpg +13341.jpg +19919.jpg +5916.jpg +5354.jpg +14474.jpg +23513.jpg +11232.jpg +23261.jpg +25752.jpg +28007.jpg +5953.jpg +23821.jpg +12272.jpg +19086.jpg +20695.jpg +27101.jpg +3666.jpg +28103.jpg +6830.jpg +3342.jpg +14884.jpg +23753.jpg +25579.jpg +5697.jpg +3914.jpg +9170.jpg +22483.jpg +3507.jpg +1613.jpg +6674.jpg +28965.jpg +23394.jpg +14600.jpg +9922.jpg +15242.jpg +8512.jpg +29379.jpg +16960.jpg +25087.jpg +14497.jpg +2974.jpg +16934.jpg +24915.jpg +4073.jpg +13063.jpg +12392.jpg +25669.jpg +14217.jpg +29210.jpg +28528.jpg +29781.jpg +7177.jpg +19665.jpg +17796.jpg +10014.jpg +20754.jpg +16687.jpg +19341.jpg +21632.jpg +3015.jpg +15295.jpg +25118.jpg +12294.jpg +7338.jpg +18366.jpg +347.jpg +25956.jpg +835.jpg +14366.jpg +25335.jpg +15007.jpg +17936.jpg +20094.jpg +24912.jpg +17215.jpg +15847.jpg +17643.jpg +1207.jpg +14132.jpg +13902.jpg +9827.jpg +22436.jpg +4529.jpg +1494.jpg +14110.jpg +6451.jpg +23078.jpg +18661.jpg +3421.jpg +3320.jpg +7279.jpg +24154.jpg +11549.jpg +3993.jpg +18410.jpg +24980.jpg +13266.jpg +682.jpg +5356.jpg +20067.jpg +16683.jpg +6506.jpg +25018.jpg +3539.jpg +3254.jpg +1712.jpg +21034.jpg +26057.jpg +26916.jpg +13737.jpg +26457.jpg +28900.jpg +9904.jpg +21004.jpg +29442.jpg +1099.jpg +14612.jpg +24806.jpg +27791.jpg +343.jpg +26895.jpg +14325.jpg +2460.jpg +17693.jpg +14457.jpg +7722.jpg +22375.jpg +17908.jpg +22025.jpg +9510.jpg +17596.jpg +13488.jpg +13271.jpg +2552.jpg +27675.jpg +9408.jpg +10180.jpg +1155.jpg +2408.jpg +21355.jpg +11360.jpg +21784.jpg +23349.jpg +21530.jpg +26131.jpg +15776.jpg +1245.jpg +16303.jpg +2360.jpg +21241.jpg +29974.jpg +27361.jpg +21645.jpg +24429.jpg +24803.jpg +16755.jpg +4695.jpg +14930.jpg +23188.jpg +28466.jpg +25351.jpg +16420.jpg +5569.jpg +29549.jpg +2390.jpg +16353.jpg +7391.jpg +1960.jpg +14756.jpg +24737.jpg +11838.jpg +758.jpg +24477.jpg +25537.jpg +24427.jpg +28934.jpg +9198.jpg +8816.jpg +13353.jpg +6152.jpg +3356.jpg +29975.jpg +21209.jpg +27765.jpg +3294.jpg +28162.jpg +16551.jpg +14418.jpg +22987.jpg +25123.jpg +12758.jpg +12677.jpg +26313.jpg +1336.jpg +23215.jpg +21777.jpg +13986.jpg +5678.jpg +10852.jpg +4720.jpg +8395.jpg +23280.jpg +13115.jpg +17630.jpg +10312.jpg +7270.jpg +20087.jpg +29790.jpg +18556.jpg +11070.jpg +9240.jpg +17949.jpg +7893.jpg +19729.jpg +21757.jpg +25784.jpg +25870.jpg +11904.jpg +12816.jpg +25134.jpg +13316.jpg +4016.jpg +7762.jpg +2522.jpg +3169.jpg +5620.jpg +20293.jpg +2945.jpg +10208.jpg +10996.jpg +1664.jpg +5305.jpg +19709.jpg +29761.jpg +17269.jpg +28105.jpg +3457.jpg +6488.jpg +13120.jpg +5468.jpg +15952.jpg +23995.jpg +317.jpg +3083.jpg +24674.jpg +5937.jpg +1500.jpg +8086.jpg +1703.jpg +6780.jpg +16059.jpg +23452.jpg +17712.jpg +22673.jpg +26535.jpg +12684.jpg +12603.jpg +10948.jpg +29658.jpg +11001.jpg +13007.jpg +6560.jpg +8054.jpg +14237.jpg +19200.jpg +14673.jpg +15772.jpg +22720.jpg +1151.jpg +12026.jpg +21068.jpg +6229.jpg +9832.jpg +9720.jpg +19513.jpg +28945.jpg +9026.jpg +20011.jpg +21406.jpg +21570.jpg +11765.jpg +27646.jpg +1699.jpg +2225.jpg +5995.jpg +14197.jpg +7592.jpg +27604.jpg +14794.jpg +2145.jpg +8010.jpg +29811.jpg +10983.jpg +24144.jpg +25612.jpg +9254.jpg +15946.jpg +19092.jpg +29610.jpg +2379.jpg +22592.jpg +16940.jpg +1900.jpg +24528.jpg +13377.jpg +16697.jpg +5817.jpg +2415.jpg +13257.jpg +3985.jpg +13776.jpg +21573.jpg +4709.jpg +3748.jpg +9314.jpg +25599.jpg +29217.jpg +2473.jpg +2868.jpg +8140.jpg +17326.jpg +8267.jpg +20342.jpg +17212.jpg +10739.jpg +24883.jpg +27875.jpg +2252.jpg +20628.jpg +24535.jpg +9847.jpg +24504.jpg +29092.jpg +14893.jpg +7233.jpg +23424.jpg +24963.jpg +5260.jpg +9861.jpg +22240.jpg +25860.jpg +3798.jpg +2659.jpg +6516.jpg +224.jpg +6170.jpg +2102.jpg +20421.jpg +23299.jpg +10994.jpg +27660.jpg +2136.jpg +23633.jpg +16015.jpg +16689.jpg +9992.jpg +10367.jpg +26989.jpg +3969.jpg +12654.jpg +12936.jpg +12638.jpg +27523.jpg +10939.jpg +11968.jpg +1219.jpg +8077.jpg +7229.jpg +11733.jpg +24654.jpg +22117.jpg +22963.jpg +12645.jpg +11946.jpg +11494.jpg +10053.jpg +18694.jpg +3188.jpg +2819.jpg +17257.jpg +17601.jpg +14348.jpg +13191.jpg +14003.jpg +12.jpg +26258.jpg +8332.jpg +12150.jpg +13234.jpg +9783.jpg +18733.jpg +15185.jpg +9579.jpg +22675.jpg +9721.jpg +13654.jpg +987.jpg +6587.jpg +4306.jpg +7304.jpg +2501.jpg +24244.jpg +14679.jpg +14978.jpg +6101.jpg +28673.jpg +20690.jpg +22449.jpg +848.jpg +6606.jpg +3214.jpg +23201.jpg +12325.jpg +26383.jpg +22092.jpg +8762.jpg +21136.jpg +22334.jpg +9081.jpg +2729.jpg +24332.jpg +1584.jpg +12644.jpg +20371.jpg +4805.jpg +15766.jpg +19297.jpg +13416.jpg +1492.jpg +17293.jpg +25256.jpg +13189.jpg +1063.jpg +29592.jpg +3940.jpg +16968.jpg +9504.jpg +5410.jpg +2373.jpg +18350.jpg +235.jpg +20681.jpg +14690.jpg +5071.jpg +12465.jpg +14768.jpg +20530.jpg +20499.jpg +29274.jpg +18848.jpg +11507.jpg +12306.jpg +16883.jpg +9888.jpg +3575.jpg +4433.jpg +23047.jpg +17551.jpg +28315.jpg +26552.jpg +9277.jpg +14512.jpg +26947.jpg +21230.jpg +11013.jpg +25593.jpg +16671.jpg +17150.jpg +1621.jpg +10065.jpg +14114.jpg +2787.jpg +17682.jpg +2012.jpg +22126.jpg +401.jpg +23797.jpg +1028.jpg +20353.jpg +3864.jpg +720.jpg +20339.jpg +29377.jpg +5672.jpg +10850.jpg +7844.jpg +20756.jpg +9483.jpg +13880.jpg +27906.jpg +9916.jpg +4145.jpg +21768.jpg +6099.jpg +16836.jpg +16701.jpg +18509.jpg +27638.jpg +28969.jpg +21459.jpg +3007.jpg +24607.jpg +16711.jpg +4879.jpg +13650.jpg +15205.jpg +16057.jpg +24463.jpg +9461.jpg +26781.jpg +6019.jpg +18448.jpg +15280.jpg +29470.jpg +21010.jpg +10277.jpg +2059.jpg +7012.jpg +4.jpg +20331.jpg +3945.jpg +562.jpg +3657.jpg +20069.jpg +20210.jpg +17957.jpg +139.jpg +6704.jpg +20014.jpg +24301.jpg +6202.jpg +12822.jpg +4634.jpg +28220.jpg +1084.jpg +11930.jpg +21346.jpg +25486.jpg +510.jpg +7400.jpg +1581.jpg +26396.jpg +18965.jpg +23909.jpg +16730.jpg +26220.jpg +1186.jpg +3026.jpg +22722.jpg +13279.jpg +7869.jpg +985.jpg +21399.jpg +22378.jpg +11688.jpg +20211.jpg +24496.jpg +19240.jpg +11695.jpg +27912.jpg +17845.jpg +13374.jpg +9165.jpg +8658.jpg +17661.jpg +18177.jpg +20651.jpg +1211.jpg +28569.jpg +28051.jpg +9055.jpg +4737.jpg +12681.jpg +3503.jpg +24123.jpg +25822.jpg +25280.jpg +5153.jpg +13417.jpg +22816.jpg +16653.jpg +19467.jpg +10150.jpg +26110.jpg +15244.jpg +9634.jpg +12018.jpg +8186.jpg +24854.jpg +23133.jpg +1943.jpg +379.jpg +16896.jpg +20454.jpg +4560.jpg +18819.jpg +22373.jpg +28564.jpg +15044.jpg +2174.jpg +24815.jpg +1623.jpg +23163.jpg +12606.jpg +3276.jpg +27833.jpg +28130.jpg +15212.jpg +18925.jpg +26921.jpg +3766.jpg +22968.jpg +3249.jpg +15692.jpg +8151.jpg +1907.jpg +26282.jpg +18911.jpg +23526.jpg +4781.jpg +13376.jpg +27575.jpg +415.jpg +1274.jpg +9883.jpg +2575.jpg +21277.jpg +27125.jpg +16345.jpg +15922.jpg +27098.jpg +396.jpg +15493.jpg +29596.jpg +27803.jpg +9144.jpg +10245.jpg +9789.jpg +1090.jpg +29422.jpg +29002.jpg +21610.jpg +27503.jpg +12333.jpg +23398.jpg +9997.jpg +25898.jpg +28070.jpg +2077.jpg +16875.jpg +6495.jpg +7524.jpg +21159.jpg +10334.jpg +28763.jpg +7672.jpg +26950.jpg +4062.jpg +16633.jpg +2530.jpg +8202.jpg +6035.jpg +8831.jpg +9019.jpg +2126.jpg +10805.jpg +18787.jpg +3371.jpg +13919.jpg +11536.jpg +17975.jpg +14968.jpg +9648.jpg +16627.jpg +9772.jpg +14921.jpg +14219.jpg +6935.jpg +13262.jpg +22677.jpg +10223.jpg +4324.jpg +14538.jpg +8154.jpg +8807.jpg +13474.jpg +13260.jpg +361.jpg +13755.jpg +22311.jpg +1995.jpg +16381.jpg +11716.jpg +19798.jpg +11558.jpg +26830.jpg +10181.jpg +23946.jpg +17587.jpg +16736.jpg +8673.jpg +5911.jpg +12537.jpg +26400.jpg +12637.jpg +23850.jpg +11742.jpg +13569.jpg +25602.jpg +1830.jpg +6304.jpg +932.jpg +25777.jpg +13278.jpg +808.jpg +29459.jpg +24842.jpg +2158.jpg +28269.jpg +10129.jpg +1720.jpg +17169.jpg +8226.jpg +8370.jpg +8970.jpg +9224.jpg +15074.jpg +4211.jpg +12566.jpg +15200.jpg +2875.jpg +17321.jpg +11984.jpg +4535.jpg +27329.jpg +6068.jpg +21058.jpg +2652.jpg +12568.jpg +12404.jpg +12826.jpg +10497.jpg +4315.jpg +25631.jpg +22602.jpg +23929.jpg +3527.jpg +8584.jpg +9530.jpg +23157.jpg +11133.jpg +26312.jpg +29877.jpg +20771.jpg +28034.jpg +7277.jpg +8048.jpg +18867.jpg +18720.jpg +2797.jpg +29111.jpg +25009.jpg +25269.jpg +19585.jpg +26157.jpg +9350.jpg +14604.jpg +28686.jpg +2067.jpg +18979.jpg +23138.jpg +29106.jpg +15034.jpg +15497.jpg +5358.jpg +11755.jpg +15305.jpg +10551.jpg +15370.jpg +1869.jpg +3858.jpg +15149.jpg +1797.jpg +3064.jpg +3680.jpg +26254.jpg +8958.jpg +9064.jpg +5476.jpg +8772.jpg +8303.jpg +10342.jpg +1812.jpg +27105.jpg +12858.jpg +1682.jpg +26146.jpg +12902.jpg +13638.jpg +2655.jpg +1416.jpg +17754.jpg +11668.jpg +7798.jpg +26942.jpg +1201.jpg +6776.jpg +20270.jpg +2889.jpg +19560.jpg +24866.jpg +19069.jpg +21465.jpg +6688.jpg +9513.jpg +19389.jpg +560.jpg +12608.jpg +19439.jpg +19810.jpg +2179.jpg +16984.jpg +28429.jpg +29608.jpg +22480.jpg +13935.jpg +14898.jpg +5504.jpg +7202.jpg +5705.jpg +2903.jpg +5536.jpg +10186.jpg +29577.jpg +25903.jpg +7934.jpg +6409.jpg +26914.jpg +26721.jpg +19089.jpg +18591.jpg +21369.jpg +23931.jpg +28002.jpg +26708.jpg +17593.jpg +2518.jpg +9028.jpg +25310.jpg +19118.jpg +29859.jpg +17641.jpg +822.jpg +18197.jpg +20989.jpg +7631.jpg +5663.jpg +15799.jpg +21261.jpg +24971.jpg +23106.jpg +13538.jpg +21796.jpg +17794.jpg +18890.jpg +13222.jpg +5805.jpg +13467.jpg +21190.jpg +2764.jpg +8834.jpg +22215.jpg +27266.jpg +10473.jpg +24973.jpg +29981.jpg +4293.jpg +25900.jpg +15745.jpg +10142.jpg +18756.jpg +18825.jpg +5295.jpg +19804.jpg +25701.jpg +17879.jpg +19620.jpg +19700.jpg +23005.jpg +29169.jpg +27716.jpg +20899.jpg +28681.jpg +20887.jpg +2213.jpg +4425.jpg +27727.jpg +14656.jpg +24880.jpg +6678.jpg +11972.jpg +22356.jpg +28142.jpg +22642.jpg +16581.jpg +27078.jpg +28712.jpg +22790.jpg +1654.jpg +6398.jpg +19933.jpg +9587.jpg +10572.jpg +12844.jpg +26354.jpg +10379.jpg +29989.jpg +3136.jpg +21589.jpg +2078.jpg +15191.jpg +9207.jpg +27586.jpg +14314.jpg +16016.jpg +12032.jpg +2828.jpg +21910.jpg +10500.jpg +2523.jpg +692.jpg +29464.jpg +21520.jpg +12188.jpg +14356.jpg +26304.jpg +21211.jpg +5365.jpg +6351.jpg +748.jpg +29224.jpg +23004.jpg +28496.jpg +19155.jpg +24976.jpg +26761.jpg +28940.jpg +22685.jpg +28410.jpg +28190.jpg +5906.jpg +28336.jpg +18166.jpg +913.jpg +14211.jpg +15093.jpg +27336.jpg +26322.jpg +12328.jpg +13339.jpg +8840.jpg +17722.jpg +16641.jpg +10046.jpg +760.jpg +194.jpg +21435.jpg +18017.jpg +27872.jpg +15467.jpg +12220.jpg +9824.jpg +15095.jpg +10588.jpg +2483.jpg +12016.jpg +22173.jpg +23535.jpg +9338.jpg +23636.jpg +29622.jpg +9882.jpg +27903.jpg +3245.jpg +14887.jpg +1440.jpg +1642.jpg +4042.jpg +7390.jpg +11029.jpg +21988.jpg +17357.jpg +28281.jpg +2240.jpg +23667.jpg +9734.jpg +25010.jpg +1697.jpg +23811.jpg +18724.jpg +6286.jpg +9166.jpg +26001.jpg +20462.jpg +15519.jpg +24608.jpg +1756.jpg +22090.jpg +7068.jpg +11446.jpg +2992.jpg +15296.jpg +18503.jpg +2801.jpg +5858.jpg +23993.jpg +18430.jpg +19331.jpg +26169.jpg +17736.jpg +4614.jpg +15052.jpg +19994.jpg +23586.jpg +16859.jpg +911.jpg +5800.jpg +5234.jpg +13382.jpg +26363.jpg +23141.jpg +11580.jpg +24314.jpg +3795.jpg +22444.jpg +183.jpg +27582.jpg +4808.jpg +11736.jpg +14667.jpg +27911.jpg +7209.jpg +2931.jpg +21362.jpg +22879.jpg +8725.jpg +28722.jpg +28844.jpg +4883.jpg +12534.jpg +20360.jpg +3944.jpg +3052.jpg +18440.jpg +23606.jpg +19151.jpg +23107.jpg +239.jpg +20294.jpg +4722.jpg +1267.jpg +28870.jpg +21196.jpg +20182.jpg +4576.jpg +28246.jpg +6718.jpg +8360.jpg +29602.jpg +10255.jpg +29449.jpg +13665.jpg +2049.jpg +15138.jpg +14234.jpg +29563.jpg +13646.jpg +5791.jpg +29896.jpg +20366.jpg +22998.jpg +153.jpg +10380.jpg +8950.jpg +5775.jpg +22460.jpg +9890.jpg +13379.jpg +29284.jpg +11330.jpg +17384.jpg +10066.jpg +4644.jpg +26973.jpg +3185.jpg +3181.jpg +4296.jpg +8311.jpg +21464.jpg +23436.jpg +15614.jpg +11600.jpg +9817.jpg +4896.jpg +27435.jpg +3166.jpg +8945.jpg +21268.jpg +10837.jpg +2051.jpg +12863.jpg +12209.jpg +19397.jpg +10036.jpg +11488.jpg +24408.jpg +27599.jpg +14465.jpg +13868.jpg +9390.jpg +26593.jpg +23471.jpg +18982.jpg +12157.jpg +14902.jpg +7337.jpg +25578.jpg +17842.jpg +28491.jpg +18412.jpg +8262.jpg +10562.jpg +11683.jpg +13092.jpg +12650.jpg +27294.jpg +27876.jpg +13295.jpg +23860.jpg +19882.jpg +4992.jpg +14210.jpg +19889.jpg +14641.jpg +17812.jpg +24019.jpg +16964.jpg +23119.jpg +29313.jpg +29669.jpg +25595.jpg +16274.jpg +27071.jpg +14188.jpg +3142.jpg +4427.jpg +27763.jpg +26726.jpg +1831.jpg +23940.jpg +23869.jpg +1341.jpg +25965.jpg +29073.jpg +21170.jpg +8206.jpg +18299.jpg +22073.jpg +9771.jpg +5938.jpg +23180.jpg +9807.jpg +22553.jpg +24517.jpg +3934.jpg +1016.jpg +1853.jpg +21602.jpg +21305.jpg +26030.jpg +11785.jpg +14301.jpg +28603.jpg +20766.jpg +26122.jpg +22272.jpg +29398.jpg +19680.jpg +29702.jpg +3008.jpg +7848.jpg +18285.jpg +24759.jpg +24778.jpg +2644.jpg +355.jpg +24280.jpg +24313.jpg +17322.jpg +13082.jpg +3107.jpg +17367.jpg +23958.jpg +28241.jpg +29667.jpg +13596.jpg +15762.jpg +13858.jpg +5272.jpg +3887.jpg +3512.jpg +4685.jpg +609.jpg +17880.jpg +27643.jpg +41.jpg +10540.jpg +13059.jpg +28946.jpg +25674.jpg +11905.jpg +11970.jpg +14588.jpg +8426.jpg +16490.jpg +23509.jpg +11925.jpg +29578.jpg +6228.jpg +2579.jpg +17823.jpg +24849.jpg +2529.jpg +19293.jpg +27546.jpg +21804.jpg +28983.jpg +6093.jpg +4581.jpg +9253.jpg +11496.jpg +11624.jpg +26121.jpg +1992.jpg +25795.jpg +13623.jpg +14580.jpg +23439.jpg +28827.jpg +2206.jpg +26772.jpg +9247.jpg +19416.jpg +25199.jpg +28573.jpg +12079.jpg +711.jpg +11380.jpg +13319.jpg +16956.jpg +9113.jpg +27208.jpg +4351.jpg +16180.jpg +19847.jpg +16990.jpg +27725.jpg +23130.jpg +16942.jpg +26235.jpg +13519.jpg +7584.jpg +14602.jpg +25517.jpg +15859.jpg +293.jpg +4159.jpg +15928.jpg +4899.jpg +23144.jpg +11239.jpg +17251.jpg +94.jpg +16140.jpg +2620.jpg +15434.jpg +26311.jpg +27693.jpg +15803.jpg +29822.jpg +6227.jpg +22187.jpg +8437.jpg +8635.jpg +8570.jpg +4279.jpg +1319.jpg +20369.jpg +17379.jpg +4423.jpg +6844.jpg +20460.jpg +20547.jpg +13699.jpg +20502.jpg +16870.jpg +5952.jpg +6250.jpg +8682.jpg +27513.jpg +7915.jpg +15116.jpg +12080.jpg +21791.jpg +24956.jpg +10323.jpg +10260.jpg +14024.jpg +17077.jpg +20287.jpg +27535.jpg +3743.jpg +8629.jpg +3995.jpg +27145.jpg +19586.jpg +5838.jpg +11159.jpg +23270.jpg +17898.jpg +28927.jpg +6834.jpg +27526.jpg +14434.jpg +11541.jpg +24961.jpg +10042.jpg +3517.jpg +12304.jpg +9212.jpg +28486.jpg +1469.jpg +7519.jpg +2159.jpg +10657.jpg +3368.jpg +1774.jpg +4893.jpg +10022.jpg +9512.jpg +14722.jpg +13413.jpg +16135.jpg +14026.jpg +18123.jpg +641.jpg +1407.jpg +14718.jpg +3936.jpg +521.jpg +19844.jpg +19499.jpg +21756.jpg +15124.jpg +16692.jpg +29423.jpg +14965.jpg +7428.jpg +14963.jpg +3313.jpg +2861.jpg +13274.jpg +1075.jpg +525.jpg +3620.jpg +19896.jpg +13961.jpg +24906.jpg +25327.jpg +9345.jpg +3692.jpg +18159.jpg +15231.jpg +4098.jpg +9651.jpg +15145.jpg +13067.jpg +1450.jpg +13779.jpg +25728.jpg +28825.jpg +6846.jpg +13328.jpg +27328.jpg +27367.jpg +6370.jpg +11260.jpg +21458.jpg +12191.jpg +17501.jpg +307.jpg +9945.jpg +22195.jpg +19565.jpg +7314.jpg +2849.jpg +14189.jpg +14598.jpg +11114.jpg +22884.jpg +12867.jpg +19292.jpg +2060.jpg +7077.jpg +15110.jpg +14664.jpg +16608.jpg +9681.jpg +13619.jpg +13547.jpg +10758.jpg +23288.jpg +29079.jpg +23019.jpg +20823.jpg +7110.jpg +4676.jpg +4799.jpg +19486.jpg +25541.jpg +21925.jpg +23826.jpg +13872.jpg +847.jpg +22031.jpg +475.jpg +28473.jpg +16976.jpg +9115.jpg +29949.jpg +26597.jpg +11381.jpg +8224.jpg +24486.jpg +11888.jpg +12248.jpg +8501.jpg +1487.jpg +23972.jpg +16272.jpg +3634.jpg +26008.jpg +3373.jpg +24024.jpg +5146.jpg +7056.jpg +9426.jpg +8608.jpg +18871.jpg +16181.jpg +7387.jpg +11414.jpg +28135.jpg +2851.jpg +17912.jpg +21507.jpg +15431.jpg +17830.jpg +23990.jpg +3255.jpg +21944.jpg +15282.jpg +2142.jpg +24566.jpg +17460.jpg +3283.jpg +12647.jpg +21628.jpg +5124.jpg +4210.jpg +20677.jpg +24829.jpg +23347.jpg +10746.jpg +27738.jpg +9946.jpg +18765.jpg +7925.jpg +27173.jpg +22226.jpg +16767.jpg +16106.jpg +20108.jpg +13183.jpg +1717.jpg +23379.jpg +1678.jpg +1911.jpg +18618.jpg +25626.jpg +5274.jpg +27126.jpg +22302.jpg +23755.jpg +9800.jpg +8064.jpg +18924.jpg +86.jpg +10179.jpg +14982.jpg +5936.jpg +2417.jpg +11505.jpg +21551.jpg +7371.jpg +20409.jpg +27180.jpg +12962.jpg +175.jpg +1892.jpg +17204.jpg +15767.jpg +12921.jpg +29089.jpg +14028.jpg +15935.jpg +8327.jpg +3789.jpg +3551.jpg +5965.jpg +27368.jpg +26092.jpg +29902.jpg +4880.jpg +23153.jpg +21335.jpg +3624.jpg +6894.jpg +14873.jpg +21378.jpg +17118.jpg +13700.jpg +19111.jpg +21625.jpg +7385.jpg +29508.jpg +27438.jpg +18731.jpg +11266.jpg +27904.jpg +19442.jpg +23375.jpg +29269.jpg +28294.jpg +5647.jpg +20349.jpg +4711.jpg +10064.jpg +11152.jpg +19390.jpg +5575.jpg +15708.jpg +28227.jpg +27579.jpg +27091.jpg +26948.jpg +6048.jpg +28963.jpg +23246.jpg +23800.jpg +14271.jpg +24116.jpg +10749.jpg +28076.jpg +22179.jpg +2592.jpg +19028.jpg +4054.jpg +14502.jpg +412.jpg +789.jpg +8203.jpg +11164.jpg +8448.jpg +23232.jpg +26668.jpg +19023.jpg +16732.jpg +17188.jpg +3468.jpg +16660.jpg +21614.jpg +12061.jpg +22247.jpg +5017.jpg +9751.jpg +25084.jpg +6236.jpg +23549.jpg +11964.jpg +15582.jpg +6008.jpg +679.jpg +23919.jpg +21971.jpg +15941.jpg +17075.jpg +1150.jpg +3784.jpg +19842.jpg +16165.jpg +13218.jpg +10804.jpg +6640.jpg +24348.jpg +16640.jpg +11378.jpg +19788.jpg +6242.jpg +5100.jpg +27622.jpg +21174.jpg +13516.jpg +6373.jpg +17555.jpg +20764.jpg +22922.jpg +27224.jpg +23438.jpg +20456.jpg +20434.jpg +17407.jpg +23847.jpg +25978.jpg +18530.jpg +2197.jpg +24436.jpg +20025.jpg +23639.jpg +26390.jpg +27887.jpg +12907.jpg +24652.jpg +5119.jpg +8940.jpg +5582.jpg +23939.jpg +21862.jpg +29562.jpg +9137.jpg +757.jpg +4232.jpg +3128.jpg +28537.jpg +2091.jpg +21683.jpg +17893.jpg +24453.jpg +11489.jpg +28273.jpg +9346.jpg +9722.jpg +1144.jpg +19923.jpg +2760.jpg +25477.jpg +477.jpg +7003.jpg +8204.jpg +13397.jpg +29375.jpg +5450.jpg +288.jpg +11903.jpg +19792.jpg +6742.jpg +17135.jpg +3584.jpg +3492.jpg +29281.jpg +15813.jpg +13338.jpg +24738.jpg +15710.jpg +26592.jpg +114.jpg +17761.jpg +13060.jpg +8888.jpg +18830.jpg +23714.jpg +13175.jpg +4262.jpg +4657.jpg +6383.jpg +22192.jpg +16315.jpg +13292.jpg +19784.jpg +14138.jpg +3103.jpg +25291.jpg +10409.jpg +23326.jpg +8588.jpg +6855.jpg +11455.jpg +23882.jpg +29821.jpg +21861.jpg +15740.jpg +18967.jpg +10016.jpg +22504.jpg +1615.jpg +13209.jpg +2.jpg +10537.jpg +2180.jpg +18119.jpg +16161.jpg +12642.jpg +1321.jpg +13675.jpg +12438.jpg +24531.jpg +15624.jpg +28792.jpg +22103.jpg +101.jpg +14853.jpg +2753.jpg +25040.jpg +20862.jpg +9218.jpg +13381.jpg +6698.jpg +21192.jpg +29584.jpg +1289.jpg +1601.jpg +12666.jpg +15010.jpg +16118.jpg +11402.jpg +3587.jpg +538.jpg +6146.jpg +3908.jpg +17397.jpg +14228.jpg +7595.jpg +22620.jpg +15769.jpg +9602.jpg +20638.jpg +5690.jpg +20987.jpg +17209.jpg +28462.jpg +11710.jpg +8095.jpg +29851.jpg +8449.jpg +7949.jpg +29721.jpg +2119.jpg +8526.jpg +19362.jpg +6875.jpg +11779.jpg +24945.jpg +7113.jpg +5922.jpg +17481.jpg +25475.jpg +933.jpg +11759.jpg +9889.jpg +9758.jpg +24018.jpg +1694.jpg +3504.jpg +15360.jpg +25968.jpg +28288.jpg +1814.jpg +21065.jpg +312.jpg +19799.jpg +23287.jpg +12783.jpg +11443.jpg +9105.jpg +25039.jpg +7032.jpg +10210.jpg +11510.jpg +16034.jpg +13891.jpg +2273.jpg +16816.jpg +27118.jpg +12185.jpg +7407.jpg +18887.jpg +5288.jpg +15924.jpg +11703.jpg +5336.jpg +28861.jpg +23301.jpg +18413.jpg +710.jpg +696.jpg +13622.jpg +27153.jpg +9869.jpg +8583.jpg +21666.jpg +1809.jpg +21780.jpg +27710.jpg +18057.jpg +29003.jpg +29223.jpg +6785.jpg +8671.jpg +26430.jpg +22953.jpg +5221.jpg +15658.jpg +13412.jpg +26573.jpg +21541.jpg +3850.jpg +22248.jpg +17701.jpg +22052.jpg +725.jpg +10457.jpg +17316.jpg +20617.jpg +2215.jpg +14224.jpg +12396.jpg +1112.jpg +26113.jpg +17006.jpg +18945.jpg +19268.jpg +7670.jpg +11088.jpg +26768.jpg +6970.jpg +9714.jpg +9647.jpg +22320.jpg +26654.jpg +17347.jpg +23205.jpg +25908.jpg +17061.jpg +19787.jpg +7836.jpg +6311.jpg +6873.jpg +2245.jpg +11009.jpg +25856.jpg +23518.jpg +17025.jpg +5877.jpg +21540.jpg +15907.jpg +19981.jpg +3043.jpg +12634.jpg +17537.jpg +14410.jpg +24589.jpg +21699.jpg +21278.jpg +24900.jpg +20985.jpg +23920.jpg +1906.jpg +3223.jpg +13217.jpg +24495.jpg +14324.jpg +26152.jpg +9216.jpg +8392.jpg +26557.jpg +9077.jpg +11053.jpg +13992.jpg +19071.jpg +16797.jpg +4484.jpg +10214.jpg +23313.jpg +14140.jpg +2569.jpg +29875.jpg +2774.jpg +27478.jpg +9092.jpg +19458.jpg +13031.jpg +22251.jpg +15133.jpg +4316.jpg +15233.jpg +9025.jpg +27558.jpg +18292.jpg +14157.jpg +13871.jpg +10642.jpg +5608.jpg +16200.jpg +17291.jpg +14070.jpg +6553.jpg +15119.jpg +4275.jpg +21436.jpg +29268.jpg +26405.jpg +29980.jpg +19269.jpg +15389.jpg +11971.jpg +7758.jpg +11929.jpg +3122.jpg +9811.jpg +2509.jpg +15654.jpg +2096.jpg +15005.jpg +14842.jpg +19477.jpg +26546.jpg +12353.jpg +24399.jpg +9989.jpg +9987.jpg +4378.jpg +18706.jpg +170.jpg +1688.jpg +18789.jpg +1491.jpg +29659.jpg +14595.jpg +5296.jpg +23571.jpg +19158.jpg +6600.jpg +15033.jpg +5496.jpg +15844.jpg +10781.jpg +17572.jpg +10788.jpg +844.jpg +18482.jpg +12146.jpg +19169.jpg +8895.jpg +29322.jpg +16814.jpg +15190.jpg +12323.jpg +28340.jpg +4999.jpg +8994.jpg +17533.jpg +8408.jpg +11328.jpg +7159.jpg +14772.jpg +8401.jpg +15436.jpg +23829.jpg +26500.jpg +12853.jpg +29272.jpg +12983.jpg +17034.jpg +26892.jpg +5371.jpg +5323.jpg +10513.jpg +5703.jpg +6325.jpg +20938.jpg +10913.jpg +25079.jpg +21444.jpg +19503.jpg +27426.jpg +21098.jpg +16267.jpg +29097.jpg +10438.jpg +22216.jpg +29867.jpg +13824.jpg +12947.jpg +19867.jpg +23798.jpg +27839.jpg +10578.jpg +7995.jpg +4933.jpg +7100.jpg +11520.jpg +11322.jpg +19128.jpg +11619.jpg +9272.jpg +4564.jpg +4620.jpg +4234.jpg +3661.jpg +14158.jpg +22441.jpg +23501.jpg +25382.jpg +17651.jpg +17164.jpg +14539.jpg +14397.jpg +27460.jpg +11708.jpg +1000.jpg +27324.jpg +22340.jpg +21983.jpg +17676.jpg +19785.jpg +29607.jpg +4488.jpg +4304.jpg +19650.jpg +11021.jpg +6701.jpg +11726.jpg +23601.jpg +28550.jpg +24491.jpg +29678.jpg +24317.jpg +10770.jpg +6624.jpg +27096.jpg +28806.jpg +2154.jpg +20644.jpg +387.jpg +11112.jpg +19893.jpg +15572.jpg +11066.jpg +3328.jpg +5887.jpg +28531.jpg +14105.jpg +17234.jpg +19125.jpg +24072.jpg +21926.jpg +15757.jpg +28768.jpg +13708.jpg +21854.jpg +19881.jpg +15087.jpg +11213.jpg +1580.jpg +12887.jpg +20945.jpg +25163.jpg +23320.jpg +15636.jpg +18268.jpg +25658.jpg +5871.jpg +21484.jpg +1886.jpg +1820.jpg +1794.jpg +26249.jpg +13632.jpg +25615.jpg +27825.jpg +14056.jpg +25322.jpg +29760.jpg +9334.jpg +12686.jpg +18485.jpg +6609.jpg +4793.jpg +10162.jpg +28068.jpg +13595.jpg +16438.jpg +11891.jpg +18304.jpg +26118.jpg +8306.jpg +11272.jpg +11623.jpg +11770.jpg +29626.jpg +26838.jpg +25292.jpg +29498.jpg +9706.jpg +14059.jpg +5434.jpg +7694.jpg +18826.jpg +3981.jpg +12778.jpg +5650.jpg +27756.jpg +24629.jpg +24391.jpg +20062.jpg +6043.jpg +10839.jpg +23510.jpg +28701.jpg +6054.jpg +19537.jpg +11570.jpg +12943.jpg +2315.jpg +6823.jpg +773.jpg +1807.jpg +29967.jpg +18437.jpg +16786.jpg +2886.jpg +15216.jpg +8482.jpg +28149.jpg +5754.jpg +26823.jpg +13705.jpg +21232.jpg +20438.jpg +17564.jpg +26433.jpg +20301.jpg +13533.jpg +18269.jpg +10141.jpg +26026.jpg +7693.jpg +3285.jpg +7910.jpg +26926.jpg +25930.jpg +6554.jpg +8348.jpg +11368.jpg +11371.jpg +21938.jpg +18126.jpg +11411.jpg +5105.jpg +1404.jpg +17411.jpg +2486.jpg +26129.jpg +7938.jpg +6946.jpg +7458.jpg +11618.jpg +28295.jpg +22450.jpg +15934.jpg +20406.jpg +8982.jpg +6709.jpg +1587.jpg +16459.jpg +27677.jpg +3656.jpg +1125.jpg +11255.jpg +16516.jpg +17090.jpg +11805.jpg +2613.jpg +24040.jpg +24278.jpg +28833.jpg +14129.jpg +10088.jpg +17236.jpg +29609.jpg +5793.jpg +28718.jpg +17816.jpg +14609.jpg +29564.jpg +12701.jpg +11800.jpg +28390.jpg +26541.jpg +27470.jpg +20378.jpg +24157.jpg +19874.jpg +19520.jpg +13505.jpg +1153.jpg +11447.jpg +6611.jpg +24889.jpg +9250.jpg +29411.jpg +9206.jpg +10810.jpg +14567.jpg +15681.jpg +12793.jpg +14124.jpg +5904.jpg +5595.jpg +29226.jpg +9780.jpg +6052.jpg +23996.jpg +2763.jpg +11136.jpg +3510.jpg +1569.jpg +27225.jpg +15777.jpg +11572.jpg +750.jpg +13648.jpg +5596.jpg +19222.jpg +22687.jpg +11796.jpg +10440.jpg +29344.jpg +2660.jpg +22995.jpg +3509.jpg +25368.jpg +23455.jpg +8622.jpg +843.jpg +8973.jpg +23598.jpg +4325.jpg +8238.jpg +8931.jpg +20466.jpg +5389.jpg +5970.jpg +28240.jpg +21682.jpg +23174.jpg +3247.jpg +15586.jpg +25069.jpg +5962.jpg +28291.jpg +16033.jpg +352.jpg +7237.jpg +1687.jpg +27657.jpg +13452.jpg +4725.jpg +12888.jpg +6727.jpg +15652.jpg +14152.jpg +20616.jpg +214.jpg +14901.jpg +15285.jpg +21183.jpg +1355.jpg +14638.jpg +29767.jpg +27399.jpg +5087.jpg +2740.jpg +24946.jpg +459.jpg +26461.jpg +11172.jpg +28492.jpg +5619.jpg +11015.jpg +10674.jpg +1976.jpg +28459.jpg +1599.jpg +5868.jpg +13507.jpg +21067.jpg +15978.jpg +10420.jpg +18984.jpg +4543.jpg +25260.jpg +17351.jpg +29034.jpg +23776.jpg +16410.jpg +11291.jpg +5537.jpg +27858.jpg +22181.jpg +3977.jpg +6746.jpg +10696.jpg +26791.jpg +14518.jpg +28339.jpg +4987.jpg +23400.jpg +16065.jpg +15785.jpg +22514.jpg +5863.jpg +716.jpg +23194.jpg +22393.jpg +18762.jpg +13612.jpg +2996.jpg +17580.jpg +26067.jpg +24195.jpg +11866.jpg +20872.jpg +24879.jpg +3567.jpg +3968.jpg +7178.jpg +19929.jpg +7563.jpg +7058.jpg +18324.jpg +15506.jpg +17028.jpg +1637.jpg +15543.jpg +6811.jpg +29618.jpg +26659.jpg +18683.jpg +26427.jpg +20101.jpg +29099.jpg +13095.jpg +27410.jpg +20459.jpg +27934.jpg +19858.jpg +26397.jpg +29502.jpg +1027.jpg +11678.jpg +3321.jpg +15566.jpg +5884.jpg +26780.jpg +8350.jpg +15527.jpg +18441.jpg +29907.jpg +25794.jpg +21371.jpg +11292.jpg +27052.jpg +8257.jpg +7212.jpg +16565.jpg +16833.jpg +27628.jpg +14889.jpg +27665.jpg +15971.jpg +23316.jpg +20389.jpg +26769.jpg +16948.jpg +23154.jpg +10530.jpg +26783.jpg +17940.jpg +110.jpg +19326.jpg +25733.jpg +8422.jpg +3155.jpg +8560.jpg +10351.jpg +20348.jpg +16151.jpg +1920.jpg +11855.jpg +21710.jpg +11370.jpg +22704.jpg +22455.jpg +24753.jpg +18635.jpg +3702.jpg +15441.jpg +28753.jpg +2693.jpg +18130.jpg +7378.jpg +3134.jpg +3159.jpg +16999.jpg +26870.jpg +2423.jpg +16476.jpg +26620.jpg +9306.jpg +9050.jpg +13773.jpg +14524.jpg +18870.jpg +20474.jpg +4689.jpg +2032.jpg +2783.jpg +7567.jpg +14828.jpg +22469.jpg +14145.jpg +25739.jpg +316.jpg +4043.jpg +793.jpg +21778.jpg +23700.jpg +5814.jpg +24479.jpg +2048.jpg +14259.jpg +26320.jpg +13455.jpg +3379.jpg +15616.jpg +12378.jpg +22751.jpg +2475.jpg +21244.jpg +15156.jpg +11231.jpg +12930.jpg +11254.jpg +6719.jpg +22902.jpg +3102.jpg +27824.jpg +26117.jpg +14011.jpg +6010.jpg +18176.jpg +16645.jpg +21451.jpg +25352.jpg +4119.jpg +17.jpg +19493.jpg +4443.jpg +19261.jpg +7834.jpg +24846.jpg +2925.jpg +28210.jpg +8812.jpg +20936.jpg +5043.jpg +29948.jpg +20423.jpg +17417.jpg +24001.jpg +15238.jpg +28602.jpg +12836.jpg +28647.jpg +29979.jpg +19238.jpg +2629.jpg +11933.jpg +12805.jpg +12593.jpg +23696.jpg +7702.jpg +22739.jpg +27823.jpg +25195.jpg +16423.jpg +12266.jpg +20574.jpg +26015.jpg +8661.jpg +1636.jpg +557.jpg +881.jpg +4628.jpg +12641.jpg +4161.jpg +9048.jpg +16672.jpg +15352.jpg +8954.jpg +9063.jpg +22345.jpg +17329.jpg +22219.jpg +4538.jpg +1412.jpg +11813.jpg +4001.jpg +10200.jpg +25474.jpg +21518.jpg +2585.jpg +25459.jpg +2233.jpg +14422.jpg +29120.jpg +10821.jpg +19649.jpg +130.jpg +19582.jpg +24830.jpg +12899.jpg +22539.jpg +21466.jpg +23378.jpg +11068.jpg +9140.jpg +519.jpg +16361.jpg +9371.jpg +22349.jpg +9935.jpg +1508.jpg +14231.jpg +1916.jpg +5945.jpg +11168.jpg +4462.jpg +8857.jpg +7117.jpg +24816.jpg +19869.jpg +25592.jpg +4537.jpg +24635.jpg +17319.jpg +4089.jpg +25387.jpg +10165.jpg +1545.jpg +12460.jpg +8651.jpg +28514.jpg +22010.jpg +6797.jpg +23330.jpg +4740.jpg +16756.jpg +10011.jpg +10285.jpg +4160.jpg +23219.jpg +4565.jpg +12015.jpg +20946.jpg +29998.jpg +17069.jpg +19487.jpg +12464.jpg +29797.jpg +23584.jpg +20568.jpg +29172.jpg +11562.jpg +8439.jpg +3224.jpg +15109.jpg +29994.jpg +18891.jpg +11899.jpg +19317.jpg +2508.jpg +6574.jpg +4140.jpg +2922.jpg +6126.jpg +10782.jpg +9823.jpg +29048.jpg +8316.jpg +24509.jpg +382.jpg +11080.jpg +25119.jpg +22279.jpg +2181.jpg +27045.jpg +17642.jpg +8152.jpg +9551.jpg +28752.jpg +26575.jpg +648.jpg +12055.jpg +16652.jpg +16615.jpg +4367.jpg +2212.jpg +24308.jpg +3989.jpg +12885.jpg +22803.jpg +14010.jpg +8600.jpg +8769.jpg +6517.jpg +18507.jpg +10868.jpg +3209.jpg +21608.jpg +26710.jpg +27191.jpg +16707.jpg +5283.jpg +27545.jpg +14728.jpg +13846.jpg +5979.jpg +4816.jpg +19206.jpg +18541.jpg +26194.jpg +16263.jpg +18892.jpg +10356.jpg +24342.jpg +28798.jpg +5063.jpg +15498.jpg +2857.jpg +7021.jpg +9797.jpg +9014.jpg +25050.jpg +20180.jpg +15451.jpg +9017.jpg +23064.jpg +17406.jpg +9713.jpg +28498.jpg +19371.jpg +1131.jpg +24095.jpg +7731.jpg +17731.jpg +632.jpg +20364.jpg +837.jpg +14470.jpg +8219.jpg +15640.jpg +26140.jpg +2121.jpg +3898.jpg +2494.jpg +11082.jpg +26770.jpg +8628.jpg +8.jpg +17805.jpg +2971.jpg +22959.jpg +21175.jpg +28875.jpg +23618.jpg +10331.jpg +21526.jpg +2582.jpg +17387.jpg +3753.jpg +27466.jpg +16011.jpg +19032.jpg +3698.jpg +18256.jpg +19166.jpg +29305.jpg +7708.jpg +7324.jpg +23165.jpg +16186.jpg +11587.jpg +24413.jpg +27423.jpg +735.jpg +24514.jpg +28107.jpg +20978.jpg +13843.jpg +19640.jpg +28259.jpg +24374.jpg +20600.jpg +26291.jpg +7585.jpg +7577.jpg +27018.jpg +498.jpg +9142.jpg +5467.jpg +27487.jpg +6053.jpg +256.jpg +12697.jpg +23500.jpg +30.jpg +8379.jpg +8648.jpg +17729.jpg +2114.jpg +21131.jpg +14353.jpg +7061.jpg +7751.jpg +18405.jpg +360.jpg +16950.jpg +22188.jpg +11148.jpg +2982.jpg +9952.jpg +2062.jpg +19584.jpg +8349.jpg +14575.jpg +20911.jpg +4623.jpg +17318.jpg +29445.jpg +20001.jpg +24421.jpg +25635.jpg +29662.jpg +3614.jpg +3238.jpg +17757.jpg +25881.jpg +27200.jpg +24582.jpg +6004.jpg +13715.jpg +25081.jpg +804.jpg +24114.jpg +3845.jpg +1465.jpg +21372.jpg +15372.jpg +14021.jpg +4719.jpg +18640.jpg +1420.jpg +11369.jpg +25447.jpg +15351.jpg +28186.jpg +27990.jpg +21320.jpg +21951.jpg +20071.jpg +25288.jpg +7838.jpg +29264.jpg +3065.jpg +13299.jpg +27881.jpg +7793.jpg +4530.jpg +16301.jpg +7822.jpg +5130.jpg +16584.jpg +2437.jpg +13176.jpg +941.jpg +27382.jpg +4856.jpg +16334.jpg +23838.jpg +27926.jpg +28801.jpg +16363.jpg +13480.jpg +570.jpg +21973.jpg +10779.jpg +2254.jpg +24231.jpg +26551.jpg +2146.jpg +16650.jpg +22510.jpg +13075.jpg +72.jpg +20930.jpg +17943.jpg +13931.jpg +25369.jpg +22827.jpg +8073.jpg +8566.jpg +26000.jpg +26868.jpg +18009.jpg +26521.jpg +20545.jpg +6543.jpg +8330.jpg +1564.jpg +24227.jpg +19693.jpg +3277.jpg +26755.jpg +21316.jpg +24122.jpg +12651.jpg +12857.jpg +22352.jpg +10919.jpg +11781.jpg +9573.jpg +12365.jpg +15313.jpg +26806.jpg +16421.jpg +7658.jpg +3306.jpg +6943.jpg +12680.jpg +55.jpg +1862.jpg +25594.jpg +4595.jpg +20340.jpg +26033.jpg +20548.jpg +6158.jpg +29373.jpg +20557.jpg +14426.jpg +22625.jpg +18361.jpg +2779.jpg +5964.jpg +1895.jpg +22064.jpg +26229.jpg +16439.jpg +20437.jpg +23812.jpg +26790.jpg +22508.jpg +9635.jpg +10671.jpg +22359.jpg +18121.jpg +48.jpg +28319.jpg +8780.jpg +22463.jpg +25418.jpg +25174.jpg +16546.jpg +24817.jpg +8273.jpg +10008.jpg +631.jpg +16733.jpg +27230.jpg +136.jpg +21927.jpg +12738.jpg +1464.jpg +17308.jpg +10824.jpg +8166.jpg +8192.jpg +4100.jpg +25609.jpg +24336.jpg +6973.jpg +18616.jpg +17363.jpg +14919.jpg +1004.jpg +29161.jpg +8060.jpg +4088.jpg +28611.jpg +2461.jpg +22229.jpg +4481.jpg +6885.jpg +25441.jpg +5008.jpg +25264.jpg +21893.jpg +13306.jpg +16518.jpg +25608.jpg +18552.jpg +15420.jpg +23292.jpg +6731.jpg +372.jpg +26915.jpg +20840.jpg +25816.jpg +8968.jpg +25454.jpg +13402.jpg +28540.jpg +26314.jpg +26997.jpg +6282.jpg +22162.jpg +10704.jpg +2278.jpg +27783.jpg +22474.jpg +13009.jpg +12492.jpg +3964.jpg +20748.jpg +6475.jpg +10128.jpg +3759.jpg +22577.jpg +6735.jpg +29961.jpg +6082.jpg +25695.jpg +24710.jpg +3590.jpg +6565.jpg +2547.jpg +7303.jpg +6816.jpg +24222.jpg +13344.jpg +27152.jpg +19601.jpg +15402.jpg +4540.jpg +26547.jpg +22488.jpg +6545.jpg +20128.jpg +20333.jpg +15991.jpg +15152.jpg +4562.jpg +592.jpg +1834.jpg +23842.jpg +4926.jpg +19374.jpg +25682.jpg +12027.jpg +7677.jpg +26513.jpg +23965.jpg +3070.jpg +20652.jpg +6159.jpg +25808.jpg +2515.jpg +1269.jpg +10196.jpg +2943.jpg +1091.jpg +17394.jpg +18703.jpg +28865.jpg +12261.jpg +24843.jpg +5727.jpg +4827.jpg +19098.jpg +19079.jpg +321.jpg +28006.jpg +319.jpg +16089.jpg +5404.jpg +14857.jpg +11986.jpg +8452.jpg +2674.jpg +10274.jpg +27908.jpg +7322.jpg +13704.jpg +29558.jpg +8353.jpg +26754.jpg +18297.jpg +4489.jpg +25020.jpg +25934.jpg +20381.jpg +285.jpg +16481.jpg +1182.jpg +8416.jpg +14525.jpg +3339.jpg +25574.jpg +12799.jpg +11886.jpg +21663.jpg +20206.jpg +19506.jpg +18697.jpg +16527.jpg +12660.jpg +2348.jpg +15041.jpg +24312.jpg +16562.jpg +28874.jpg +17720.jpg +12529.jpg +5253.jpg +23610.jpg +4826.jpg +10445.jpg +10479.jpg +24362.jpg +23875.jpg +4183.jpg +29664.jpg +23376.jpg +22695.jpg +28479.jpg +16533.jpg +14811.jpg +18340.jpg +1963.jpg +10082.jpg +8557.jpg +6467.jpg +3903.jpg +22218.jpg +23590.jpg +14045.jpg +15590.jpg +10727.jpg +23752.jpg +4022.jpg +12450.jpg +9550.jpg +18122.jpg +13801.jpg +25598.jpg +16343.jpg +1299.jpg +9034.jpg +19234.jpg +8130.jpg +20299.jpg +27269.jpg +19085.jpg +417.jpg +23434.jpg +14986.jpg +9169.jpg +21556.jpg +8554.jpg +19106.jpg +27184.jpg +23117.jpg +18118.jpg +2140.jpg +322.jpg +10991.jpg +589.jpg +14475.jpg +4200.jpg +420.jpg +9.jpg +12264.jpg +20068.jpg +17056.jpg +2954.jpg +26509.jpg +873.jpg +16604.jpg +29529.jpg +3790.jpg +8951.jpg +12315.jpg +11535.jpg +15664.jpg +18095.jpg +12709.jpg +22290.jpg +7008.jpg +19314.jpg +15603.jpg +16696.jpg +26316.jpg +9514.jpg +26276.jpg +22427.jpg +7265.jpg +29486.jpg +7473.jpg +20782.jpg +2080.jpg +19600.jpg +29109.jpg +25187.jpg +4237.jpg +18763.jpg +28355.jpg +4259.jpg +21579.jpg +7764.jpg +27530.jpg +16142.jpg +2044.jpg +17127.jpg +25055.jpg +3778.jpg +13317.jpg +27278.jpg +9398.jpg +3385.jpg +12279.jpg +2601.jpg +11072.jpg +20922.jpg +9773.jpg +1885.jpg +27375.jpg +12339.jpg +8107.jpg +16869.jpg +9884.jpg +25107.jpg +19239.jpg +29559.jpg +26255.jpg +7578.jpg +29830.jpg +4399.jpg +424.jpg +14009.jpg +4195.jpg +29751.jpg +15236.jpg +27840.jpg +16340.jpg +16815.jpg +20967.jpg +17944.jpg +2831.jpg +1.jpg +24261.jpg +29072.jpg +15228.jpg +22867.jpg +25358.jpg +11493.jpg +1734.jpg +29265.jpg +17687.jpg +14123.jpg +2271.jpg +20523.jpg +7120.jpg +6455.jpg +26795.jpg +25834.jpg +17173.jpg +23227.jpg +8428.jpg +21779.jpg +16115.jpg +28225.jpg +27421.jpg +13049.jpg +4020.jpg +29865.jpg +28503.jpg +20441.jpg +29095.jpg +3543.jpg +15715.jpg +6151.jpg +27780.jpg +3589.jpg +8359.jpg +7612.jpg +206.jpg +15241.jpg +25251.jpg +6408.jpg +19965.jpg +26652.jpg +22057.jpg +22828.jpg +24079.jpg +18391.jpg +25453.jpg +25274.jpg +16039.jpg +3926.jpg +10964.jpg +11248.jpg +3149.jpg +14764.jpg +12904.jpg +3326.jpg +20144.jpg +13513.jpg +14326.jpg +3563.jpg +29188.jpg +18277.jpg +42.jpg +20395.jpg +12136.jpg +16588.jpg +23271.jpg +11470.jpg +22564.jpg +27868.jpg +20105.jpg +18935.jpg +26025.jpg +29287.jpg +24014.jpg +7620.jpg +25168.jpg +27512.jpg +10411.jpg +12848.jpg +20689.jpg +15424.jpg +1448.jpg +24179.jpg +26774.jpg +1129.jpg +18853.jpg +23319.jpg +8165.jpg +27095.jpg +18875.jpg +3094.jpg +8092.jpg +13369.jpg +1915.jpg +21309.jpg +10591.jpg +4757.jpg +10317.jpg +27122.jpg +27302.jpg +27661.jpg +19616.jpg +19985.jpg +11731.jpg +11453.jpg +26214.jpg +5488.jpg +20551.jpg +3195.jpg +25247.jpg +1386.jpg +12906.jpg +6021.jpg +18864.jpg +4618.jpg +8091.jpg +1909.jpg +29143.jpg +487.jpg +3685.jpg +18894.jpg +25936.jpg +17446.jpg +3075.jpg +1089.jpg +28133.jpg +15723.jpg +28672.jpg +4775.jpg +21723.jpg +7083.jpg +17674.jpg +16194.jpg +28032.jpg +26740.jpg +15006.jpg +141.jpg +22319.jpg +6089.jpg +28231.jpg +17125.jpg +5839.jpg +269.jpg +2079.jpg +17147.jpg +3821.jpg +10075.jpg +16515.jpg +14096.jpg +23589.jpg +14542.jpg +11242.jpg +18545.jpg +20426.jpg +6305.jpg +1650.jpg +29791.jpg +8340.jpg +16400.jpg +19939.jpg +23854.jpg +12116.jpg +28947.jpg +2372.jpg +28704.jpg +26063.jpg +26488.jpg +13477.jpg +22882.jpg +21348.jpg +1085.jpg +10860.jpg +10893.jpg +10803.jpg +11651.jpg +13892.jpg +19422.jpg +8213.jpg +21626.jpg +12717.jpg +23282.jpg +17433.jpg +7281.jpg +14751.jpg +11576.jpg +1553.jpg +23695.jpg +4674.jpg +25171.jpg +6548.jpg +3178.jpg +10724.jpg +15834.jpg +27548.jpg +2330.jpg +22544.jpg +674.jpg +26170.jpg +20345.jpg +20009.jpg +15321.jpg +12780.jpg +17418.jpg +3911.jpg +3161.jpg +13901.jpg +9073.jpg +11092.jpg +10270.jpg +16091.jpg +14520.jpg +3744.jpg +24501.jpg +6503.jpg +11655.jpg +16582.jpg +29511.jpg +7364.jpg +19335.jpg +29801.jpg +18416.jpg +21960.jpg +24896.jpg +12233.jpg +1603.jpg +6369.jpg +1172.jpg +19009.jpg +11233.jpg +11675.jpg +5615.jpg +5828.jpg +1631.jpg +4299.jpg +4125.jpg +10709.jpg +24088.jpg +9079.jpg +587.jpg +12668.jpg +23764.jpg +29863.jpg +17151.jpg +21825.jpg +5517.jpg +8697.jpg +5687.jpg +3080.jpg +17548.jpg +29943.jpg +21920.jpg +13687.jpg +14797.jpg +6715.jpg +6591.jpg +5994.jpg +20066.jpg +22495.jpg +10493.jpg +14136.jpg +11956.jpg +15649.jpg +1415.jpg +11750.jpg +4669.jpg +27009.jpg +3473.jpg +337.jpg +28275.jpg +25730.jpg +8802.jpg +13315.jpg +13979.jpg +427.jpg +2762.jpg +13404.jpg +22797.jpg +15936.jpg +29480.jpg +19011.jpg +20098.jpg +7002.jpg +198.jpg +26866.jpg +29756.jpg +5357.jpg +3055.jpg +18345.jpg +27580.jpg +15309.jpg +20675.jpg +10647.jpg +9583.jpg +19993.jpg +27473.jpg +6318.jpg +20055.jpg +22928.jpg +25324.jpg +18555.jpg +27719.jpg +24380.jpg +14318.jpg +6279.jpg +26818.jpg +15581.jpg +3621.jpg +20493.jpg +7278.jpg +2972.jpg +20532.jpg +3729.jpg +15432.jpg +24208.jpg +16397.jpg +10769.jpg +9991.jpg +9509.jpg +29007.jpg +19177.jpg +454.jpg +26482.jpg +12107.jpg +6391.jpg +2979.jpg +15856.jpg +21361.jpg +11951.jpg +21121.jpg +24818.jpg +10889.jpg +13959.jpg +28272.jpg +29651.jpg +9788.jpg +7809.jpg +25941.jpg +724.jpg +11297.jpg +15976.jpg +4329.jpg +4395.jpg +23855.jpg +22230.jpg +13226.jpg +28258.jpg +6097.jpg +26123.jpg +22136.jpg +11657.jpg +20549.jpg +19018.jpg +8346.jpg +18132.jpg +12969.jpg +7106.jpg +28555.jpg +19594.jpg +19143.jpg +10799.jpg +6694.jpg +24375.jpg +5049.jpg +24565.jpg +27576.jpg +11076.jpg +6106.jpg +26962.jpg +21061.jpg +23653.jpg +1689.jpg +22559.jpg +17478.jpg +19568.jpg +2946.jpg +17680.jpg +19201.jpg +2152.jpg +25783.jpg +5837.jpg +28433.jpg +22502.jpg +13248.jpg +5854.jpg +22909.jpg +26297.jpg +19845.jpg +22030.jpg +25140.jpg +18913.jpg +21605.jpg +25417.jpg +28123.jpg +17664.jpg +22956.jpg +13247.jpg +22499.jpg +3816.jpg +11948.jpg +21508.jpg +7993.jpg +14250.jpg +24938.jpg +25760.jpg +28136.jpg +9312.jpg +546.jpg +13657.jpg +15563.jpg +20127.jpg +13968.jpg +20393.jpg +10952.jpg +26068.jpg +25347.jpg +4401.jpg +11142.jpg +23259.jpg +9858.jpg +14729.jpg +3404.jpg +16840.jpg +29308.jpg +22243.jpg +4060.jpg +12687.jpg +6655.jpg +25353.jpg +10829.jpg +6631.jpg +20829.jpg +28662.jpg +27573.jpg +3375.jpg +26885.jpg +9355.jpg +27003.jpg +10248.jpg +16408.jpg +694.jpg +4189.jpg +12524.jpg +5353.jpg +22130.jpg +27406.jpg +6449.jpg +21236.jpg +22042.jpg +15307.jpg +9000.jpg +481.jpg +26802.jpg +9704.jpg +4572.jpg +26553.jpg +28470.jpg +23543.jpg +26560.jpg +615.jpg +26415.jpg +14784.jpg +25404.jpg +26098.jpg +11647.jpg +8882.jpg +7487.jpg +21724.jpg +4619.jpg +21923.jpg +22589.jpg +27778.jpg +19048.jpg +16116.jpg +15398.jpg +9931.jpg +17146.jpg +5494.jpg +1964.jpg +13501.jpg +25716.jpg +22358.jpg +28671.jpg +29359.jpg +4252.jpg +6487.jpg +15440.jpg +28480.jpg +2130.jpg +15818.jpg +1315.jpg +19301.jpg +10884.jpg +9902.jpg +20490.jpg +16156.jpg +4147.jpg +916.jpg +5312.jpg +26841.jpg +25544.jpg +17096.jpg +15725.jpg +23315.jpg +3855.jpg +693.jpg +25027.jpg +2489.jpg +20692.jpg +10627.jpg +3540.jpg +7611.jpg +13577.jpg +25233.jpg +12765.jpg +13730.jpg +18529.jpg +17009.jpg +29064.jpg +4228.jpg +3225.jpg +26711.jpg +2098.jpg +8084.jpg +9114.jpg +14964.jpg +9619.jpg +29380.jpg +8196.jpg +17570.jpg +9377.jpg +26138.jpg +13394.jpg +22752.jpg +16425.jpg +15657.jpg +5622.jpg +9920.jpg +18454.jpg +28942.jpg +13587.jpg +1775.jpg +7974.jpg +13349.jpg +22454.jpg +12192.jpg +22839.jpg +7485.jpg +8293.jpg +2647.jpg +6757.jpg +28325.jpg +19720.jpg +5000.jpg +29919.jpg +17725.jpg +2679.jpg +23184.jpg +23206.jpg +5525.jpg +26366.jpg +20132.jpg +24355.jpg +18424.jpg +14146.jpg +18465.jpg +10966.jpg +17806.jpg +6007.jpg +2236.jpg +19075.jpg +2429.jpg +24267.jpg +18234.jpg +12258.jpg +29587.jpg +29922.jpg +2290.jpg +1338.jpg +9343.jpg +27846.jpg +4357.jpg +22939.jpg +22236.jpg +7413.jpg +6586.jpg +11302.jpg +11996.jpg +15495.jpg +20738.jpg +20624.jpg +21812.jpg +26151.jpg +27064.jpg +8109.jpg +2758.jpg +11462.jpg +29818.jpg +9637.jpg +8561.jpg +9668.jpg +16623.jpg +1228.jpg +11182.jpg +12624.jpg +15202.jpg +24851.jpg +29536.jpg +13486.jpg +15939.jpg +3399.jpg +29227.jpg +19667.jpg +24224.jpg +23103.jpg +7883.jpg +7570.jpg +21623.jpg +1509.jpg +25248.jpg +11512.jpg +26437.jpg +10760.jpg +28460.jpg +4364.jpg +18632.jpg +17901.jpg +17301.jpg +29966.jpg +29959.jpg +7498.jpg +27557.jpg +29987.jpg +7132.jpg +6306.jpg +8586.jpg +17962.jpg +26881.jpg +14438.jpg +29400.jpg +15548.jpg +10293.jpg +14440.jpg +5824.jpg +15383.jpg +6334.jpg +12643.jpg +3717.jpg +23983.jpg +18146.jpg +22710.jpg +15491.jpg +23472.jpg +6859.jpg +6335.jpg +29484.jpg +23503.jpg +22983.jpg +26100.jpg +26956.jpg +4774.jpg +8019.jpg +17376.jpg +11949.jpg +7520.jpg +4928.jpg +5628.jpg +9035.jpg +21853.jpg +12493.jpg +13914.jpg +18700.jpg +27265.jpg +5007.jpg +20939.jpg +9872.jpg +4065.jpg +18097.jpg +1944.jpg +25662.jpg +10315.jpg +9611.jpg +5346.jpg +6072.jpg +6703.jpg +13850.jpg +2363.jpg +15408.jpg +22555.jpg +18905.jpg +10394.jpg +4276.jpg +28873.jpg +11040.jpg +13497.jpg +21548.jpg +17130.jpg +15801.jpg +22148.jpg +19115.jpg +5424.jpg +26344.jpg +14541.jpg +18283.jpg +22060.jpg +22104.jpg +25319.jpg +15911.jpg +3175.jpg +23608.jpg +24659.jpg +19399.jpg +16451.jpg +19455.jpg +7696.jpg +1522.jpg +13024.jpg +23049.jpg +5515.jpg +29531.jpg +623.jpg +28762.jpg +1935.jpg +3829.jpg +5756.jpg +11843.jpg +13233.jpg +11034.jpg +12164.jpg +10646.jpg +21659.jpg +14439.jpg +19523.jpg +21382.jpg +26588.jpg +10833.jpg +22464.jpg +24754.jpg +18298.jpg +4738.jpg +12071.jpg +21669.jpg +13706.jpg +23539.jpg +23244.jpg +21031.jpg +5441.jpg +13099.jpg +25722.jpg +262.jpg +9067.jpg +26809.jpg +846.jpg +28318.jpg +20947.jpg +1867.jpg +13991.jpg +27453.jpg +19749.jpg +1996.jpg +25336.jpg +6272.jpg +29799.jpg +26716.jpg +518.jpg +6879.jpg +10376.jpg +25330.jpg +23494.jpg +27899.jpg +28348.jpg +24026.jpg +23634.jpg +329.jpg +25394.jpg +2457.jpg +22792.jpg +6969.jpg +2387.jpg +995.jpg +17928.jpg +10389.jpg +10258.jpg +14284.jpg +24447.jpg +5695.jpg +5584.jpg +14757.jpg +28787.jpg +16577.jpg +1748.jpg +12743.jpg +22242.jpg +22591.jpg +20641.jpg +3835.jpg +11165.jpg +24773.jpg +1806.jpg +14495.jpg +21423.jpg +17852.jpg +22020.jpg +18786.jpg +2818.jpg +7812.jpg +12244.jpg +10390.jpg +2263.jpg +17910.jpg +27226.jpg +9679.jpg +18395.jpg +13322.jpg +1605.jpg +118.jpg +27000.jpg +9087.jpg +20525.jpg +7486.jpg +295.jpg +7772.jpg +5980.jpg +17201.jpg +14763.jpg +26857.jpg +12919.jpg +22800.jpg +24841.jpg +22569.jpg +25858.jpg +6197.jpg +21443.jpg +8518.jpg +5086.jpg +13829.jpg +17669.jpg +1312.jpg +25309.jpg +11048.jpg +8986.jpg +8031.jpg +11531.jpg +22772.jpg +29327.jpg +17070.jpg +9753.jpg +967.jpg +6283.jpg +17290.jpg +17098.jpg +11059.jpg +10875.jpg +26095.jpg +3814.jpg +5266.jpg +24884.jpg +19332.jpg +1189.jpg +26540.jpg +7737.jpg +28577.jpg +19076.jpg +21519.jpg +11578.jpg +28080.jpg +8329.jpg +1921.jpg +7534.jpg +24868.jpg +18184.jpg +14534.jpg +12767.jpg +11055.jpg +8237.jpg +6107.jpg +3097.jpg +27430.jpg +23413.jpg +5796.jpg +482.jpg +5181.jpg +23508.jpg +9091.jpg +5429.jpg +5072.jpg +10528.jpg +12558.jpg +10051.jpg +19495.jpg +1980.jpg +6672.jpg +5338.jpg +3694.jpg +2420.jpg +24930.jpg +8002.jpg +7187.jpg +26435.jpg +20876.jpg +3841.jpg +12074.jpg +27800.jpg +19141.jpg +607.jpg +19519.jpg +330.jpg +6183.jpg +19192.jpg +24394.jpg +4930.jpg +13491.jpg +3818.jpg +713.jpg +2859.jpg +7360.jpg +20054.jpg +19580.jpg +27240.jpg +10313.jpg +19097.jpg +27969.jpg +19521.jpg +16229.jpg +19900.jpg +22635.jpg +18527.jpg +18501.jpg +12796.jpg +1737.jpg +25769.jpg +3636.jpg +25861.jpg +12814.jpg +29292.jpg +14331.jpg +14548.jpg +26019.jpg +12314.jpg +10403.jpg +4453.jpg +17964.jpg +18152.jpg +2028.jpg +15848.jpg +1292.jpg +20481.jpg +23178.jpg +17915.jpg +1140.jpg +3433.jpg +11268.jpg +13498.jpg +24627.jpg +6003.jpg +574.jpg +7065.jpg +18991.jpg +4874.jpg +11173.jpg +27539.jpg +4934.jpg +6572.jpg +6204.jpg +17703.jpg +18053.jpg +10776.jpg +4099.jpg +23289.jpg +17616.jpg +4858.jpg +26999.jpg +21288.jpg +9871.jpg +19059.jpg +29129.jpg +4194.jpg +23848.jpg +5208.jpg +5514.jpg +29440.jpg +18353.jpg +18103.jpg +28129.jpg +20388.jpg +8507.jpg +26022.jpg +3401.jpg +29847.jpg +10550.jpg +20788.jpg +9584.jpg +20480.jpg +22213.jpg +7749.jpg +28338.jpg +12461.jpg +13155.jpg +27087.jpg +2419.jpg +21242.jpg +13746.jpg +29020.jpg +975.jpg +3290.jpg +9744.jpg +2216.jpg +29235.jpg +26468.jpg +19821.jpg +18709.jpg +18715.jpg +13552.jpg +10636.jpg +5876.jpg +24275.jpg +26299.jpg +12362.jpg +29232.jpg +14266.jpg +26209.jpg +27768.jpg +6257.jpg +13926.jpg +18794.jpg +11744.jpg +9369.jpg +6738.jpg +18020.jpg +5472.jpg +18498.jpg +26556.jpg +26903.jpg +10197.jpg +13352.jpg +25434.jpg +23563.jpg +27413.jpg +25261.jpg +17618.jpg +989.jpg +2736.jpg +2543.jpg +26785.jpg +22201.jpg +25726.jpg +19660.jpg +9436.jpg +18570.jpg +6450.jpg +27781.jpg +9766.jpg +19904.jpg +4323.jpg +12433.jpg +22960.jpg +957.jpg +4742.jpg +19322.jpg +29312.jpg +10122.jpg +12656.jpg +29107.jpg +21385.jpg +13224.jpg +3523.jpg +21485.jpg +670.jpg +6300.jpg +15877.jpg +4500.jpg +27577.jpg +17523.jpg +25825.jpg +29376.jpg +7666.jpg +529.jpg +15868.jpg +178.jpg +5411.jpg +25712.jpg +6641.jpg +7684.jpg +6462.jpg +17614.jpg +7504.jpg +6581.jpg +15428.jpg +17398.jpg +21402.jpg +20179.jpg +11086.jpg +28643.jpg +7104.jpg +3626.jpg +7964.jpg +7355.jpg +9608.jpg +19964.jpg +28793.jpg +15027.jpg +28832.jpg +83.jpg +14799.jpg +7978.jpg +3388.jpg +18459.jpg +3879.jpg +27129.jpg +18659.jpg +17456.jpg +17817.jpg +28828.jpg +23306.jpg +10649.jpg +22563.jpg +14650.jpg +13272.jpg +6956.jpg +6541.jpg +1142.jpg +7169.jpg +6972.jpg +14680.jpg +23035.jpg +13605.jpg +14142.jpg +5823.jpg +26729.jpg +28079.jpg +24653.jpg +28395.jpg +3337.jpg +745.jpg +3950.jpg +18477.jpg +5946.jpg +27852.jpg +17609.jpg +1626.jpg +28443.jpg +26670.jpg +24820.jpg +26937.jpg +13147.jpg +18323.jpg +20526.jpg +3972.jpg +19701.jpg +22374.jpg +26572.jpg +29013.jpg +5850.jpg +15752.jpg +2376.jpg +16336.jpg +9741.jpg +11704.jpg +8296.jpg +13720.jpg +11620.jpg +2769.jpg +24265.jpg +19755.jpg +3344.jpg +7156.jpg +6601.jpg +14173.jpg +17540.jpg +27369.jpg +19400.jpg +17615.jpg +20524.jpg +13006.jpg +11101.jpg +5544.jpg +9248.jpg +28306.jpg +23213.jpg +11357.jpg +9970.jpg +18936.jpg +24683.jpg +9051.jpg +11636.jpg +990.jpg +20114.jpg +14912.jpg +15620.jpg +8633.jpg +17153.jpg +26107.jpg +24015.jpg +27247.jpg +29666.jpg +23917.jpg +26065.jpg +19329.jpg +1417.jpg +23001.jpg +6294.jpg +29547.jpg +16718.jpg +6161.jpg +4466.jpg +20555.jpg +17905.jpg +15101.jpg +1217.jpg +20157.jpg +27255.jpg +14169.jpg +26827.jpg +21930.jpg +9448.jpg +6142.jpg +27479.jpg +9182.jpg +1532.jpg +8963.jpg +9999.jpg +26704.jpg +29940.jpg +0.jpg +8523.jpg +15577.jpg +52.jpg +21881.jpg +10822.jpg +23009.jpg +3068.jpg +28773.jpg +20576.jpg +11294.jpg +8088.jpg +29166.jpg +19324.jpg +16659.jpg +12961.jpg +751.jpg +24903.jpg +10544.jpg +16729.jpg +1077.jpg +6866.jpg +19313.jpg +25272.jpg +2046.jpg +15302.jpg +26411.jpg +29632.jpg +27469.jpg +23443.jpg +13522.jpg +7341.jpg +9443.jpg +6002.jpg +22372.jpg +26656.jpg +20982.jpg +16860.jpg +23568.jpg +18072.jpg +19131.jpg +1158.jpg +10605.jpg +1102.jpg +11035.jpg +21128.jpg +11990.jpg +26007.jpg +11413.jpg +9112.jpg +3092.jpg +1947.jpg +17149.jpg +16365.jpg +15821.jpg +2035.jpg +2970.jpg +29159.jpg +6270.jpg +12611.jpg +4283.jpg +3041.jpg +10062.jpg +4783.jpg +29141.jpg +16834.jpg +18054.jpg +18062.jpg +6529.jpg +8880.jpg +27967.jpg +18506.jpg +16876.jpg +6415.jpg +9020.jpg +18036.jpg +11220.jpg +4070.jpg +25925.jpg +18475.jpg +9548.jpg +5407.jpg +7963.jpg +2907.jpg +6622.jpg +13957.jpg +15792.jpg +28864.jpg +2544.jpg +9495.jpg +26959.jpg +22406.jpg +15611.jpg +20513.jpg +3165.jpg +20798.jpg +14859.jpg +26825.jpg +17713.jpg +6230.jpg +14594.jpg +13015.jpg +12001.jpg +11503.jpg +25490.jpg +22557.jpg +17277.jpg +20586.jpg +19902.jpg +24990.jpg +5616.jpg +20612.jpg +17271.jpg +2123.jpg +16341.jpg +9742.jpg +21084.jpg +11279.jpg +28997.jpg +6663.jpg +1925.jpg +23771.jpg +21235.jpg +28624.jpg +8280.jpg +28586.jpg +6226.jpg +27186.jpg +9666.jpg +463.jpg +7845.jpg +24626.jpg +27748.jpg +16278.jpg +22533.jpg +10596.jpg +6744.jpg +10573.jpg +20863.jpg +21100.jpg +9938.jpg +10832.jpg +17140.jpg +3301.jpg +27401.jpg +10226.jpg +13231.jpg +18388.jpg +6743.jpg +27416.jpg +24086.jpg +7675.jpg +14364.jpg +29491.jpg +27569.jpg +9074.jpg +13277.jpg +17777.jpg +4224.jpg +3606.jpg +1932.jpg +1044.jpg +14303.jpg +15367.jpg +16452.jpg +2389.jpg +23041.jpg +16740.jpg +15945.jpg +6684.jpg +11569.jpg +10070.jpg +20604.jpg +7525.jpg +26537.jpg +16634.jpg +20208.jpg +29968.jpg +12286.jpg +15598.jpg +14409.jpg +18151.jpg +12048.jpg +26242.jpg +3860.jpg +2128.jpg +5240.jpg +26810.jpg +15135.jpg +2837.jpg +21123.jpg +27493.jpg +20173.jpg +28720.jpg +10370.jpg +13291.jpg +3955.jpg +23403.jpg +9596.jpg +10625.jpg +15883.jpg +25402.jpg +5148.jpg +28279.jpg +6835.jpg +18959.jpg +23410.jpg +18435.jpg +27251.jpg +8960.jpg +21661.jpg +28778.jpg +27201.jpg +16197.jpg +11436.jpg +24858.jpg +23661.jpg +21217.jpg +22924.jpg +7470.jpg +27070.jpg +14424.jpg +25704.jpg +11848.jpg +22390.jpg +22627.jpg +17510.jpg +23052.jpg +3059.jpg +21463.jpg +18284.jpg +492.jpg +27936.jpg +14360.jpg +12187.jpg +29889.jpg +24835.jpg +6910.jpg +10950.jpg +15524.jpg +4984.jpg +11135.jpg +15642.jpg +14166.jpg +5273.jpg +6918.jpg +3600.jpg +4584.jpg +23851.jpg +13940.jpg +13738.jpg +3994.jpg +19312.jpg +14309.jpg +28067.jpg +20039.jpg +13613.jpg +28005.jpg +5413.jpg +965.jpg +926.jpg +14488.jpg +21952.jpg +5934.jpg +553.jpg +9650.jpg +11885.jpg +1771.jpg +1212.jpg +4084.jpg +14484.jpg +24037.jpg +19428.jpg +17744.jpg +19375.jpg +3593.jpg +20861.jpg +14229.jpg +18374.jpg +27170.jpg +1533.jpg +29844.jpg +24044.jpg +16482.jpg +931.jpg +22033.jpg +21286.jpg +425.jpg +8611.jpg +24057.jpg +18803.jpg +17695.jpg +4990.jpg +28534.jpg +10368.jpg +6922.jpg +1702.jpg +591.jpg +7440.jpg +17634.jpg +17793.jpg +16168.jpg +28237.jpg +29643.jpg +20857.jpg +24080.jpg +13781.jpg +26624.jpg +19906.jpg +21740.jpg +12509.jpg +743.jpg +7602.jpg +7340.jpg +7308.jpg +14836.jpg +6799.jpg +28463.jpg +5125.jpg +3069.jpg +25828.jpg +17210.jpg +27593.jpg +15024.jpg +8517.jpg +24761.jpg +14225.jpg +16794.jpg +13889.jpg +25871.jpg +5196.jpg +29427.jpg +5696.jpg +964.jpg +17497.jpg +2316.jpg +13529.jpg +10744.jpg +17502.jpg +5458.jpg +25702.jpg +5422.jpg +5173.jpg +20884.jpg +25451.jpg +16607.jpg +21314.jpg +15075.jpg +29727.jpg +23884.jpg +9570.jpg +27300.jpg +9810.jpg +17575.jpg +15893.jpg +26520.jpg +5431.jpg +10175.jpg +7194.jpg +27326.jpg +25075.jpg +15840.jpg +7966.jpg +9588.jpg +15206.jpg +1707.jpg +15903.jpg +15258.jpg +22733.jpg +12911.jpg +25159.jpg +17144.jpg +28788.jpg +9454.jpg +14193.jpg +3745.jpg +12562.jpg +20737.jpg +26930.jpg +29644.jpg +10532.jpg +15688.jpg +26244.jpg +25212.jpg +20271.jpg +12364.jpg +5656.jpg +24701.jpg +18113.jpg +914.jpg +28826.jpg +3828.jpg +4209.jpg +25645.jpg +26891.jpg +29825.jpg +16188.jpg +11061.jpg +16348.jpg +25942.jpg +14816.jpg +6268.jpg +4176.jpg +27793.jpg +20694.jpg +4334.jpg +2017.jpg +11128.jpg +4927.jpg +6512.jpg +870.jpg +9759.jpg +24219.jpg +18590.jpg +16207.jpg +1184.jpg +12252.jpg +7797.jpg +15950.jpg +28437.jpg +20479.jpg +28177.jpg +27752.jpg +13703.jpg +24397.jpg +384.jpg +16727.jpg +29316.jpg +21629.jpg +28065.jpg +18882.jpg +23356.jpg +12312.jpg +20843.jpg +27670.jpg +18840.jpg +9652.jpg +1882.jpg +26224.jpg +8016.jpg +9717.jpg +9229.jpg +7882.jpg +8139.jpg +18684.jpg +2191.jpg +28495.jpg +9994.jpg +2087.jpg +272.jpg +1026.jpg +7590.jpg +14635.jpg +10669.jpg +6123.jpg +16955.jpg +22783.jpg +2913.jpg +19030.jpg +26976.jpg +15575.jpg +7580.jpg +455.jpg +16250.jpg +20390.jpg +20125.jpg +13243.jpg +25780.jpg +25476.jpg +26604.jpg +15325.jpg +20311.jpg +11973.jpg +1993.jpg +22357.jpg +21982.jpg +5912.jpg +23197.jpg +25142.jpg +26247.jpg +6881.jpg +12923.jpg +21656.jpg +24894.jpg +16458.jpg +22528.jpg +17855.jpg +14155.jpg +2993.jpg +29771.jpg +22324.jpg +7288.jpg +13956.jpg +20235.jpg +29214.jpg +11367.jpg +124.jpg +24723.jpg +7768.jpg +880.jpg +15833.jpg +7655.jpg +22880.jpg +11746.jpg +7880.jpg +19220.jpg +769.jpg +24972.jpg +11195.jpg +29133.jpg +12993.jpg +26024.jpg +18342.jpg +8198.jpg +13481.jpg +27836.jpg +16169.jpg +27812.jpg +24874.jpg +22750.jpg +23502.jpg +1237.jpg +10570.jpg +14678.jpg +1660.jpg +2203.jpg +17191.jpg +7630.jpg +9263.jpg +147.jpg +10535.jpg +19100.jpg +7884.jpg +27619.jpg +22878.jpg +8522.jpg +11490.jpg +12516.jpg +9979.jpg +28954.jpg +25997.jpg +20313.jpg +7624.jpg +14894.jpg +27769.jpg +15886.jpg +12100.jpg +19868.jpg +18938.jpg +12212.jpg +18732.jpg +23836.jpg +23560.jpg +3885.jpg +19790.jpg +20696.jpg +10926.jpg +11804.jpg +5574.jpg +27920.jpg +4843.jpg +1972.jpg +9517.jpg +978.jpg +25638.jpg +10095.jpg +21432.jpg +12162.jpg +25426.jpg +7861.jpg +29783.jpg +8964.jpg +18721.jpg +8947.jpg +3387.jpg +12430.jpg +4097.jpg +28238.jpg +29657.jpg +4795.jpg +1934.jpg +23564.jpg +6960.jpg +15413.jpg +14869.jpg +7732.jpg +23042.jpg +29779.jpg +13342.jpg +16910.jpg +26966.jpg +6482.jpg +16080.jpg +2559.jpg +17450.jpg +12843.jpg +12075.jpg +17690.jpg +14134.jpg +17244.jpg +14351.jpg +3832.jpg +8398.jpg +23796.jpg +18434.jpg +28362.jpg +20447.jpg +939.jpg +2138.jpg +5562.jpg +3248.jpg +16372.jpg +27372.jpg +22368.jpg +17051.jpg +20865.jpg +14692.jpg +25135.jpg +27645.jpg +4479.jpg +24805.jpg +22691.jpg +991.jpg +8929.jpg +15940.jpg +16173.jpg +660.jpg +21765.jpg +10215.jpg +12557.jpg +10645.jpg +18820.jpg +14579.jpg +7507.jpg +10234.jpg +12384.jpg +27283.jpg +24338.jpg +22137.jpg +5704.jpg +3909.jpg +18247.jpg +11038.jpg +9777.jpg +27243.jpg +28902.jpg +25792.jpg +22894.jpg +12889.jpg +2377.jpg +18290.jpg +27046.jpg +18728.jpg +253.jpg +151.jpg +2952.jpg +28605.jpg +11391.jpg +20159.jpg +6410.jpg +1053.jpg +25376.jpg +11504.jpg +19306.jpg +16201.jpg +13345.jpg +19134.jpg +9344.jpg +11344.jpg +3372.jpg +1110.jpg +12214.jpg +7499.jpg +8432.jpg +3721.jpg +11355.jpg +5806.jpg +16986.jpg +10862.jpg +27519.jpg +26370.jpg +21226.jpg +22626.jpg +29595.jpg +22693.jpg +9330.jpg +24164.jpg +9604.jpg +14988.jpg +10027.jpg +6976.jpg +26663.jpg +28883.jpg +9739.jpg +23381.jpg +25235.jpg +616.jpg +14800.jpg +17401.jpg +25056.jpg +6810.jpg +26591.jpg +19669.jpg +6102.jpg +5642.jpg +27500.jpg +16283.jpg +14998.jpg +27123.jpg +20538.jpg +8454.jpg +19831.jpg +24423.jpg +9675.jpg +12346.jpg +3688.jpg +29307.jpg +1253.jpg +13169.jpg +5974.jpg +8842.jpg +10108.jpg +8301.jpg +5501.jpg +23465.jpg +21909.jpg +18504.jpg +12600.jpg +17675.jpg +10174.jpg +8446.jpg +19002.jpg +9046.jpg +23647.jpg +14433.jpg +12009.jpg +24786.jpg +24687.jpg +10937.jpg +2168.jpg +22156.jpg +19174.jpg +13754.jpg +22917.jpg +4900.jpg +17268.jpg +12582.jpg +24029.jpg +11938.jpg +12900.jpg +11487.jpg +28299.jpg +9661.jpg +20038.jpg +2109.jpg +6576.jpg +14706.jpg +26712.jpg +29913.jpg +11339.jpg +7441.jpg +20622.jpg +9100.jpg +20354.jpg +11802.jpg +7942.jpg +17629.jpg +2902.jpg +26876.jpg +23159.jpg +8371.jpg +18084.jpg +19522.jpg +6174.jpg +3058.jpg +4437.jpg +10131.jpg +3772.jpg +16773.jpg +13579.jpg +434.jpg +29147.jpg +6012.jpg +14825.jpg +6289.jpg +25802.jpg +3450.jpg +28623.jpg +20346.jpg +9185.jpg +29774.jpg +29001.jpg +12802.jpg +26483.jpg +13503.jpg +2331.jpg +739.jpg +24372.jpg +29695.jpg +29976.jpg +3953.jpg +12505.jpg +18023.jpg +28747.jpg +16499.jpg +27347.jpg +28782.jpg +783.jpg +24569.jpg +18245.jpg +4602.jpg +1620.jpg +26530.jpg +8904.jpg +1821.jpg +18209.jpg +5256.jpg +3489.jpg +22273.jpg +15726.jpg +17297.jpg +15755.jpg +4534.jpg +18488.jpg +8790.jpg +19873.jpg +1749.jpg +7720.jpg +6418.jpg +4682.jpg +13590.jpg +6492.jpg +15457.jpg +6216.jpg +2566.jpg +14861.jpg +19870.jpg +2129.jpg +19182.jpg +17186.jpg +21097.jpg +6639.jpg +4647.jpg +24176.jpg +18939.jpg +12803.jpg +3001.jpg +5239.jpg +25214.jpg +23968.jpg +18363.jpg +19671.jpg +17105.jpg +15894.jpg +28980.jpg +21583.jpg +24840.jpg +27172.jpg +21274.jpg +7667.jpg +1191.jpg +29638.jpg +27532.jpg +18315.jpg +11024.jpg +16712.jpg +2781.jpg +20783.jpg +17730.jpg +16067.jpg +4133.jpg +12196.jpg +13068.jpg +7940.jpg +5770.jpg +21223.jpg +19283.jpg +7376.jpg +12067.jpg +18452.jpg +28770.jpg +8110.jpg +10863.jpg +18313.jpg +17442.jpg +7877.jpg +12019.jpg +15665.jpg +3538.jpg +3202.jpg +16958.jpg +10699.jpg +17991.jpg +4700.jpg +10879.jpg +21687.jpg +287.jpg +11216.jpg +7701.jpg +28591.jpg +26745.jpg +197.jpg +16164.jpg +24000.jpg +8168.jpg +6246.jpg +22199.jpg +29647.jpg +10956.jpg +4081.jpg +15459.jpg +17243.jpg +24715.jpg +17808.jpg +9010.jpg +15892.jpg +28378.jpg +13710.jpg +26967.jpg +29042.jpg +23222.jpg +563.jpg +1556.jpg +15867.jpg +15567.jpg +10371.jpg +10609.jpg +4370.jpg +11999.jpg +26983.jpg +20163.jpg +15257.jpg +26928.jpg +17875.jpg +13466.jpg +3096.jpg +13812.jpg +22316.jpg +22636.jpg +21240.jpg +27227.jpg +20467.jpg +15790.jpg +13894.jpg +19429.jpg +25699.jpg +24970.jpg +29266.jpg +28574.jpg +6061.jpg +24148.jpg +855.jpg +27220.jpg +17049.jpg +20414.jpg +21882.jpg +24252.jpg +15084.jpg +16266.jpg +2030.jpg +6546.jpg +10012.jpg +7608.jpg +8723.jpg +10973.jpg +15545.jpg +23578.jpg +16295.jpg +6528.jpg +15022.jpg +16888.jpg +19967.jpg +21933.jpg +12648.jpg +5802.jpg +18070.jpg +3089.jpg +4067.jpg +17361.jpg +8134.jpg +10218.jpg +25646.jpg +13216.jpg +5859.jpg +23703.jpg +16358.jpg +24021.jpg +4603.jpg +12418.jpg +28803.jpg +28543.jpg +16810.jpg +17078.jpg +29251.jpg +9779.jpg +28901.jpg +25786.jpg +23498.jpg +5113.jpg +21398.jpg +26815.jpg +13146.jpg +26861.jpg +25234.jpg +143.jpg +26927.jpg +22497.jpg +27149.jpg +3794.jpg +13688.jpg +598.jpg +25127.jpg +20976.jpg +653.jpg +15500.jpg +12143.jpg +15562.jpg +4673.jpg +9155.jpg +21562.jpg +18685.jpg +3531.jpg +27758.jpg +21509.jpg +858.jpg +10531.jpg +10557.jpg +3838.jpg +6975.jpg +22159.jpg +3349.jpg +19088.jpg +13048.jpg +12125.jpg +23790.jpg +1888.jpg +18446.jpg +13152.jpg +9181.jpg +12126.jpg +9828.jpg +9874.jpg +29320.jpg +29236.jpg +19859.jpg +28372.jpg +16027.jpg +13040.jpg +11299.jpg +14293.jpg +18692.jpg +27811.jpg +15051.jpg +9125.jpg +26608.jpg +4187.jpg +19574.jpg +13239.jpg +4770.jpg +13492.jpg +473.jpg +20636.jpg +17274.jpg +28476.jpg +4951.jpg +2164.jpg +16354.jpg +24515.jpg +1970.jpg +8743.jpg +27952.jpg +17624.jpg +2431.jpg +26066.jpg +16714.jpg +1961.jpg +18899.jpg +11639.jpg +1152.jpg +10512.jpg +23743.jpg +11424.jpg +20954.jpg +2400.jpg +12115.jpg +6532.jpg +1309.jpg +8049.jpg +24567.jpg +14379.jpg +25911.jpg +28421.jpg +20966.jpg +691.jpg +8276.jpg +5878.jpg +13890.jpg +6728.jpg +11953.jpg +14496.jpg +8619.jpg +27106.jpg +11707.jpg +13749.jpg +5252.jpg +17828.jpg +20077.jpg +11719.jpg +16306.jpg +14022.jpg +9794.jpg +28618.jpg +4857.jpg +169.jpg +20258.jpg +15557.jpg +27016.jpg +23486.jpg +4868.jpg +24691.jpg +12202.jpg +960.jpg +21234.jpg +23391.jpg +18958.jpg +11920.jpg +27093.jpg +6917.jpg +16342.jpg +9654.jpg +2033.jpg +19396.jpg +10013.jpg +1871.jpg +20725.jpg +5132.jpg +3431.jpg +8391.jpg +29523.jpg +19543.jpg +14283.jpg +17974.jpg +14516.jpg +21650.jpg +9698.jpg +9747.jpg +26377.jpg +10876.jpg +5289.jpg +13380.jpg +22278.jpg +10806.jpg +22574.jpg +25515.jpg +24724.jpg +19299.jpg +7444.jpg +10213.jpg +27892.jpg +15837.jpg +19398.jpg +15262.jpg +13938.jpg +29198.jpg +5297.jpg +15204.jpg +4332.jpg +27357.jpg +14122.jpg +17027.jpg +25922.jpg +6433.jpg +10505.jpg +19604.jpg +16422.jpg +20793.jpg +4113.jpg +29390.jpg +26052.jpg +28759.jpg +28025.jpg +13459.jpg +17789.jpg +3331.jpg +28533.jpg +23137.jpg +27274.jpg +9407.jpg +5386.jpg +10402.jpg +26327.jpg +12970.jpg +21279.jpg +29752.jpg +27598.jpg +23834.jpg +24669.jpg +16055.jpg +16125.jpg +25110.jpg +15802.jpg +6661.jpg +28615.jpg +14913.jpg +24297.jpg +1732.jpg +12992.jpg +18201.jpg +26088.jpg +5905.jpg +9069.jpg +2375.jpg +19861.jpg +12095.jpg +10005.jpg +4965.jpg +26325.jpg +9059.jpg +12821.jpg +12043.jpg +19054.jpg +681.jpg +6271.jpg +13182.jpg +27309.jpg +8810.jpg +22761.jpg +18015.jpg +1463.jpg +20092.jpg +4876.jpg +27499.jpg +23111.jpg +29720.jpg +10130.jpg +20156.jpg +1159.jpg +18812.jpg +25801.jpg +1516.jpg +17334.jpg +9624.jpg +25386.jpg +13649.jpg +23698.jpg +18625.jpg +21934.jpg +13156.jpg +17857.jpg +25158.jpg +15587.jpg +24959.jpg +1736.jpg +11840.jpg +1055.jpg +23895.jpg +26441.jpg +3461.jpg +12073.jpg +11836.jpg +3830.jpg +2558.jpg +29075.jpg +27819.jpg +12405.jpg +7081.jpg +20240.jpg +29928.jpg +29414.jpg +18944.jpg +6872.jpg +27296.jpg +3333.jpg +29646.jpg +19747.jpg +29916.jpg +25092.jpg +12979.jpg +22866.jpg +26091.jpg +17971.jpg +22656.jpg +14230.jpg +4467.jpg +4571.jpg +11705.jpg +20871.jpg +7417.jpg +8666.jpg +14001.jpg +27090.jpg +1379.jpg +22914.jpg +26931.jpg +14469.jpg +8244.jpg +22017.jpg +13580.jpg +28064.jpg +3475.jpg +22868.jpg +12981.jpg +12123.jpg +10980.jpg +9457.jpg +25300.jpg +13253.jpg +12008.jpg +17820.jpg +18523.jpg +6323.jpg +8729.jpg +10480.jpg +16405.jpg +1381.jpg +21857.jpg +5486.jpg +22871.jpg +14918.jpg +21166.jpg +5258.jpg +29419.jpg +21030.jpg +23845.jpg +29866.jpg +21865.jpg +12278.jpg +2537.jpg +15685.jpg +8972.jpg +19509.jpg +9617.jpg +8183.jpg +1032.jpg +5860.jpg +7379.jpg +14006.jpg +14386.jpg +20880.jpg +2299.jpg +8180.jpg +16378.jpg +19013.jpg +4254.jpg +23787.jpg +23069.jpg +4007.jpg +5440.jpg +5001.jpg +8900.jpg +15999.jpg +29465.jpg +1224.jpg +22235.jpg +9378.jpg +23547.jpg +6991.jpg +12466.jpg +6818.jpg +11797.jpg +28928.jpg +2039.jpg +12845.jpg +2458.jpg +14077.jpg +2086.jpg +11132.jpg +21987.jpg +14162.jpg +29392.jpg +5748.jpg +22339.jpg +92.jpg +29635.jpg +14732.jpg +23789.jpg +23007.jpg +6962.jpg +15730.jpg +19352.jpg +21993.jpg +21685.jpg +25400.jpg +13387.jpg +28607.jpg +27932.jpg +25307.jpg +25919.jpg +12622.jpg +17285.jpg +4414.jpg +5623.jpg +10617.jpg +25223.jpg +16088.jpg +16709.jpg +25627.jpg +17956.jpg +16239.jpg +1781.jpg +23235.jpg +14064.jpg +22321.jpg +3282.jpg +1441.jpg +4911.jpg +13778.jpg +14821.jpg +18373.jpg +24180.jpg +4512.jpg +12331.jpg +22900.jpg +13984.jpg +3095.jpg +17355.jpg +24369.jpg +16752.jpg +29385.jpg +3801.jpg +19629.jpg +131.jpg +803.jpg +2845.jpg +12163.jpg +12299.jpg +25864.jpg +23959.jpg +5016.jpg +28930.jpg +12210.jpg +2042.jpg +6203.jpg +13839.jpg +28915.jpg +12110.jpg +10886.jpg +25001.jpg +7258.jpg +3338.jpg +10287.jpg +27718.jpg +26181.jpg +2495.jpg +3483.jpg +22161.jpg +26002.jpg +255.jpg +26078.jpg +9450.jpg +1462.jpg +7999.jpg +19195.jpg +12245.jpg +7023.jpg +27649.jpg +8055.jpg +18857.jpg +19279.jpg +3610.jpg +15625.jpg +20777.jpg +9437.jpg +7992.jpg +16226.jpg +19666.jpg +392.jpg +16502.jpg +14699.jpg +13093.jpg +27949.jpg +23629.jpg +10429.jpg +9841.jpg +14798.jpg +29548.jpg +9249.jpg +25611.jpg +29271.jpg +16170.jpg +23904.jpg +9326.jpg +9351.jpg +24090.jpg +17628.jpg +12585.jpg +12596.jpg +20382.jpg +26946.jpg +21269.jpg +9066.jpg +11588.jpg +18622.jpg +24860.jpg +15073.jpg +24899.jpg +9575.jpg +14365.jpg +9607.jpg +15055.jpg +14577.jpg +1316.jpg +17213.jpg +29777.jpg +16981.jpg +21633.jpg +24951.jpg +29555.jpg +25521.jpg +15094.jpg +24663.jpg +17060.jpg +20337.jpg +3980.jpg +22954.jpg +3602.jpg +26524.jpg +29151.jpg +8123.jpg +20760.jpg +1783.jpg +21703.jpg +14406.jpg +28447.jpg +17335.jpg +23932.jpg +25831.jpg +24110.jpg +21795.jpg +28469.jpg +19590.jpg +29717.jpg +4010.jpg +25725.jpg +185.jpg +5114.jpg +1403.jpg +29176.jpg +15617.jpg +21063.jpg +25403.jpg +15993.jpg +24210.jpg +28321.jpg +4817.jpg +9062.jpg +3705.jpg +20790.jpg +11002.jpg +28147.jpg +13578.jpg +19969.jpg +4199.jpg +29719.jpg +8240.jpg +16172.jpg +10896.jpg +2516.jpg +24798.jpg +28402.jpg +11145.jpg +8397.jpg +17747.jpg +14416.jpg +28996.jpg +14490.jpg +10000.jpg +11831.jpg +303.jpg +13641.jpg +11914.jpg +15727.jpg +9404.jpg +1718.jpg +28471.jpg +20774.jpg +28819.jpg +24832.jpg +7114.jpg +28764.jpg +14553.jpg +19829.jpg +13571.jpg +1667.jpg +4924.jpg +13620.jpg +27237.jpg +24800.jpg +27986.jpg +16086.jpg +1012.jpg +2065.jpg +8974.jpg +14851.jpg +8337.jpg +6124.jpg +12046.jpg +11842.jpg +17861.jpg +9473.jpg +11743.jpg +23374.jpg +16076.jpg +17768.jpg +9329.jpg +9527.jpg +27390.jpg +5287.jpg +26728.jpg +7782.jpg +6931.jpg +24932.jpg +18311.jpg +14442.jpg +1226.jpg +21168.jpg +10339.jpg +12429.jpg +8164.jpg +16661.jpg +1555.jpg +18681.jpg +18861.jpg +19545.jpg +315.jpg +13911.jpg +23943.jpg +5320.jpg +10905.jpg +29573.jpg +8647.jpg +25380.jpg +20251.jpg +16380.jpg +5662.jpg +3390.jpg +29404.jpg +10740.jpg +14783.jpg +19803.jpg +16923.jpg +15076.jpg +3090.jpg +8033.jpg +9303.jpg +25878.jpg +5106.jpg +27277.jpg +19338.jpg +13121.jpg +18947.jpg +4471.jpg +16698.jpg +20598.jpg +23619.jpg +14179.jpg +12589.jpg +15808.jpg +16285.jpg +20249.jpg +22951.jpg +21212.jpg +27214.jpg +4986.jpg +13842.jpg +16651.jpg +8724.jpg +13685.jpg +16669.jpg +17081.jpg +787.jpg +11078.jpg +20413.jpg +15368.jpg +19448.jpg +4546.jpg +15601.jpg +8538.jpg +25311.jpg +1512.jpg +2274.jpg +28363.jpg +7760.jpg +6341.jpg +6580.jpg +2403.jpg +8377.jpg +21377.jpg +2009.jpg +20085.jpg +13629.jpg +19385.jpg +23998.jpg +10595.jpg +17739.jpg +18704.jpg +29835.jpg +839.jpg +6669.jpg +19793.jpg +4606.jpg +26150.jpg +3287.jpg +5172.jpg +12518.jpg +15091.jpg +11598.jpg +8440.jpg +23034.jpg +17979.jpg +4761.jpg +10589.jpg +9796.jpg +6561.jpg +17866.jpg +24321.jpg +17635.jpg +22183.jpg +15172.jpg +9951.jpg +28467.jpg +26476.jpg +29640.jpg +7663.jpg +21036.jpg +1546.jpg +29378.jpg +27714.jpg +17223.jpg +14017.jpg +26567.jpg +17983.jpg +11188.jpg +14590.jpg +23395.jpg +20739.jpg +18439.jpg +18726.jpg +16471.jpg +19099.jpg +3740.jpg +14960.jpg +24890.jpg +29792.jpg +15732.jpg +4286.jpg +17162.jpg +27384.jpg +23745.jpg +3448.jpg +5556.jpg +5082.jpg +14082.jpg +23083.jpg +19781.jpg +15541.jpg +5145.jpg +10267.jpg +21055.jpg +11774.jpg +14033.jpg +21445.jpg +17104.jpg +11919.jpg +1847.jpg +18687.jpg +14307.jpg +3060.jpg +24233.jpg +15054.jpg +2004.jpg +25985.jpg +11528.jpg +25766.jpg +16356.jpg +25729.jpg +13069.jpg +18461.jpg +19989.jpg +6957.jpg +2935.jpg +11830.jpg +2854.jpg +6524.jpg +3270.jpg +27258.jpg +24002.jpg +27802.jpg +11498.jpg +12010.jpg +8013.jpg +4025.jpg +3751.jpg +18856.jpg +5064.jpg +25867.jpg +28635.jpg +4599.jpg +19699.jpg +6552.jpg +29729.jpg +14745.jpg +11354.jpg +22131.jpg +4307.jpg +8228.jpg +2153.jpg +22125.jpg +21069.jpg +2226.jpg +10330.jpg +5638.jpg +2425.jpg +5286.jpg +4208.jpg +27567.jpg +29870.jpg +12908.jpg +18007.jpg +22661.jpg +10325.jpg +7566.jpg +342.jpg +7695.jpg +12891.jpg +2715.jpg +17579.jpg +7586.jpg +15514.jpg +3208.jpg +8355.jpg +14815.jpg +11601.jpg +9621.jpg +21545.jpg +15327.jpg +20811.jpg +24981.jpg +11139.jpg +20867.jpg +13743.jpg +9201.jpg +13017.jpg +19417.jpg +9097.jpg +21601.jpg +20089.jpg +11684.jpg +23780.jpg +672.jpg +19583.jpg +5855.jpg +27662.jpg +23542.jpg +14841.jpg +29249.jpg +14181.jpg +21875.jpg +15105.jpg +1989.jpg +28202.jpg +27671.jpg +22648.jpg +14304.jpg +23765.jpg +11394.jpg +768.jpg +16573.jpg +23663.jpg +4850.jpg +22764.jpg +12186.jpg +254.jpg +8733.jpg +22743.jpg +17474.jpg +11790.jpg +28440.jpg +2392.jpg +20605.jpg +28568.jpg +20403.jpg +16461.jpg +28933.jpg +21229.jpg +7127.jpg +23537.jpg +5710.jpg +25295.jpg +1298.jpg +19978.jpg +13772.jpg +10205.jpg +17619.jpg +26230.jpg +28301.jpg +19928.jpg +14557.jpg +12564.jpg +15357.jpg +24402.jpg +3580.jpg +26523.jpg +12084.jpg +9821.jpg +26136.jpg +3690.jpg +7267.jpg +11337.jpg +20527.jpg +29384.jpg +17179.jpg +6029.jpg +13987.jpg +24521.jpg +22946.jpg +15901.jpg +13273.jpg +4226.jpg +16524.jpg +29447.jpg +28008.jpg +26854.jpg +10836.jpg +18114.jpg +8278.jpg +19633.jpg +25857.jpg +25115.jpg +8860.jpg +11901.jpg +26035.jpg +21115.jpg +23024.jpg +354.jpg +24352.jpg +24646.jpg +10652.jpg +21249.jpg +1805.jpg +17307.jpg +14504.jpg +27083.jpg +4238.jpg +29804.jpg +16771.jpg +29318.jpg +8644.jpg +16603.jpg +29955.jpg +18481.jpg +16618.jpg +16330.jpg +11420.jpg +2813.jpg +21103.jpg +5166.jpg +19610.jpg +20994.jpg +20518.jpg +2581.jpg +12086.jpg +15960.jpg +3746.jpg +15742.jpg +23840.jpg +14702.jpg +659.jpg +7593.jpg +5910.jpg +348.jpg +4124.jpg +1123.jpg +27156.jpg +12436.jpg +10198.jpg +15067.jpg +27112.jpg +103.jpg +8279.jpg +9136.jpg +6557.jpg +16095.jpg +20820.jpg +13560.jpg +25471.jpg +12103.jpg +8815.jpg +10374.jpg +24991.jpg +2927.jpg +13373.jpg +21665.jpg +3133.jpg +2517.jpg +14085.jpg +8030.jpg +12391.jpg +11758.jpg +26863.jpg +8318.jpg +22702.jpg +26968.jpg +26097.jpg +27629.jpg +9086.jpg +23310.jpg +17155.jpg +29204.jpg +13264.jpg +26936.jpg +2148.jpg +22623.jpg +4966.jpg +12980.jpg +12054.jpg +8551.jpg +98.jpg +20929.jpg +7943.jpg +7935.jpg +15728.jpg +9568.jpg +22841.jpg +24822.jpg +24130.jpg +8693.jpg +8653.jpg +23889.jpg +781.jpg +28062.jpg +18417.jpg +25589.jpg +2144.jpg +13495.jpg +10093.jpg +22779.jpg +29295.jpg +1113.jpg +1380.jpg +18643.jpg +11439.jpg +6045.jpg +2326.jpg +12698.jpg +18916.jpg +17403.jpg +238.jpg +15560.jpg +13041.jpg +25883.jpg +16318.jpg +6456.jpg +2940.jpg +8223.jpg +5831.jpg +8612.jpg +2510.jpg +16193.jpg +5955.jpg +15589.jpg +7831.jpg +17479.jpg +20102.jpg +18131.jpg +18047.jpg +1210.jpg +20874.jpg +25573.jpg +14128.jpg +5011.jpg +13909.jpg +4922.jpg +27245.jpg +14156.jpg +2406.jpg +6696.jpg +14060.jpg +12878.jpg +10800.jpg +14306.jpg +19864.jpg +25542.jpg +2910.jpg +9380.jpg +25060.jpg +1495.jpg +4473.jpg +23546.jpg +4330.jpg +1136.jpg +10127.jpg +9121.jpg +10222.jpg +6809.jpg +20514.jpg +8249.jpg +6993.jpg +24690.jpg +20126.jpg +9959.jpg +8535.jpg +14870.jpg +2229.jpg +28665.jpg +6584.jpg +21245.jpg +10187.jpg +10849.jpg +3003.jpg +8525.jpg +17348.jpg +22854.jpg +23339.jpg +18100.jpg +5236.jpg +10778.jpg +1612.jpg +22934.jpg +6774.jpg +22543.jpg +26871.jpg +10039.jpg +3006.jpg +21800.jpg +16042.jpg +5075.jpg +5085.jpg +16154.jpg +27792.jpg +16144.jpg +16988.jpg +982.jpg +15650.jpg +22245.jpg +8508.jpg +10660.jpg +24948.jpg +11027.jpg +19536.jpg +14691.jpg +1451.jpg +19037.jpg +22874.jpg +9382.jpg +15462.jpg +22289.jpg +24041.jpg +29317.jpg +23262.jpg +9283.jpg +5010.jpg +18673.jpg +11240.jpg +3979.jpg +25567.jpg +2805.jpg +29168.jpg +1855.jpg +10112.jpg +2696.jpg +17764.jpg +16310.jpg +16483.jpg +21250.jpg +1608.jpg +9927.jpg +7816.jpg +2472.jpg +27185.jpg +16496.jpg +7979.jpg +26481.jpg +14373.jpg +19832.jpg +18276.jpg +16141.jpg +27282.jpg +11816.jpg +26347.jpg +4392.jpg +25314.jpg +7778.jpg +13287.jpg +4828.jpg +24563.jpg +16949.jpg +6562.jpg +15009.jpg +29945.jpg +23734.jpg +8884.jpg +17485.jpg +2350.jpg +20909.jpg +12705.jpg +28876.jpg +26031.jpg +20162.jpg +9712.jpg +12208.jpg +27241.jpg +3435.jpg +25449.jpg +20268.jpg +25763.jpg +20932.jpg +23646.jpg +2282.jpg +25410.jpg +26331.jpg +22799.jpg +6750.jpg +29763.jpg +10289.jpg +2356.jpg +3859.jpg +21855.jpg +12005.jpg +20020.jpg +18271.jpg +7210.jpg +1611.jpg +8478.jpg +29876.jpg +26454.jpg +14276.jpg +11882.jpg +28856.jpg +13300.jpg +24488.jpg +10393.jpg +26904.jpg +1530.jpg +26261.jpg +16386.jpg +13912.jpg +88.jpg +4936.jpg +2200.jpg +12731.jpg +29551.jpg +21620.jpg +12190.jpg +8981.jpg +3082.jpg +20116.jpg +26661.jpg +26965.jpg +6163.jpg +19465.jpg +20573.jpg +5794.jpg +8941.jpg +6983.jpg +1998.jpg +14323.jpg +3765.jpg +25209.jpg +537.jpg +4438.jpg +21300.jpg +7648.jpg +9586.jpg +13053.jpg +6360.jpg +5924.jpg +11589.jpg +25342.jpg +7137.jpg +438.jpg +2307.jpg +15374.jpg +14974.jpg +5659.jpg +15689.jpg +21409.jpg +24053.jpg +3002.jpg +13470.jpg +7724.jpg +23135.jpg +26248.jpg +4577.jpg +16776.jpg +15553.jpg +9106.jpg +18035.jpg +8754.jpg +17930.jpg +4786.jpg +7628.jpg +8462.jpg +24128.jpg +21007.jpg +16227.jpg +3176.jpg +19179.jpg +18486.jpg +10372.jpg +27284.jpg +3608.jpg +1929.jpg +20581.jpg +226.jpg +26694.jpg +12227.jpg +1456.jpg +6899.jpg +20852.jpg +10305.jpg +34.jpg +11099.jpg +6416.jpg +898.jpg +26447.jpg +4527.jpg +19576.jpg +8650.jpg +24142.jpg +14646.jpg +3443.jpg +17662.jpg +16596.jpg +12869.jpg +14446.jpg +28746.jpg +24985.jpg +21758.jpg +21866.jpg +4962.jpg +11203.jpg +19816.jpg +4526.jpg +24765.jpg +11158.jpg +11301.jpg +2151.jpg +7090.jpg +19895.jpg +7863.jpg +4583.jpg +8778.jpg +3951.jpg +14455.jpg +22674.jpg +28871.jpg +22146.jpg +9508.jpg +29862.jpg +1076.jpg +5539.jpg +20398.jpg +15606.jpg +320.jpg +16939.jpg +12013.jpg +2253.jpg +24199.jpg +25889.jpg +20083.jpg +29519.jpg +2871.jpg +17202.jpg +17679.jpg +19464.jpg +13789.jpg +12494.jpg +4150.jpg +2347.jpg +827.jpg +6411.jpg +18985.jpg +13425.jpg +8290.jpg +25876.jpg +13321.jpg +2654.jpg +7636.jpg +18649.jpg +19674.jpg +29883.jpg +4650.jpg +8907.jpg +19443.jpg +19384.jpg +24769.jpg +27674.jpg +15164.jpg +9837.jpg +22135.jpg +16497.jpg +9107.jpg +27163.jpg +26695.jpg +14336.jpg +7610.jpg +29978.jpg +22068.jpg +15243.jpg +9221.jpg +13785.jpg +2609.jpg +23866.jpg +27663.jpg +17399.jpg +13714.jpg +218.jpg +5848.jpg +5040.jpg +26160.jpg +13042.jpg +26446.jpg +19853.jpg +4536.jpg +2933.jpg +5586.jpg +9565.jpg +8481.jpg +11729.jpg +9243.jpg +28018.jpg +13760.jpg +9158.jpg +3635.jpg +7891.jpg +3599.jpg +20336.jpg +25375.jpg +14937.jpg +23679.jpg +21858.jpg +3831.jpg +9395.jpg +3533.jpg +23491.jpg +6748.jpg +281.jpg +28937.jpg +17571.jpg +26023.jpg +6679.jpg +10155.jpg +11036.jpg +12434.jpg +1083.jpg +27113.jpg +28578.jpg +2677.jpg +12959.jpg +4048.jpg +28587.jpg +23740.jpg +17405.jpg +19860.jpg +29795.jpg +27883.jpg +23412.jpg +201.jpg +17089.jpg +10478.jpg +10490.jpg +11689.jpg +6201.jpg +16917.jpg +16790.jpg +15988.jpg +29259.jpg +9116.jpg +21213.jpg +13870.jpg +24505.jpg +28906.jpg +18811.jpg +26332.jpg +22144.jpg +15814.jpg +22087.jpg +11284.jpg +2919.jpg +6186.jpg +11594.jpg +3691.jpg +14201.jpg +13826.jpg +21289.jpg +8067.jpg +9962.jpg +28617.jpg +15324.jpg +2297.jpg +28195.jpg +12879.jpg +6677.jpg +2656.jpg +11071.jpg +25663.jpg +24454.jpg +10928.jpg +9507.jpg +15165.jpg +4956.jpg +3286.jpg +1572.jpg +13250.jpg +19800.jpg +11788.jpg +29443.jpg +20303.jpg +20277.jpg +14647.jpg +8705.jpg +26855.jpg +28074.jpg +13972.jpg +22793.jpg +11274.jpg +26582.jpg +28484.jpg +16855.jpg +4807.jpg +5765.jpg +14345.jpg +11085.jpg +27089.jpg +17120.jpg +483.jpg +20494.jpg +26701.jpg +6382.jpg +27263.jpg +3210.jpg +17129.jpg +28841.jpg +4151.jpg +13407.jpg +27485.jpg +13791.jpg +20220.jpg +4104.jpg +14805.jpg +8435.jpg +17751.jpg +3508.jpg +24913.jpg +4000.jpg +22458.jpg +29261.jpg +15806.jpg +23803.jpg +26501.jpg +27492.jpg +15170.jpg +14165.jpg +16813.jpg +8591.jpg +27945.jpg +4937.jpg +4405.jpg +19150.jpg +18754.jpg +3131.jpg +4846.jpg +8797.jpg +7207.jpg +9255.jpg +14752.jpg +4403.jpg +3848.jpg +18426.jpg +8755.jpg +15011.jpg +16148.jpg +29275.jpg +13565.jpg +16543.jpg +20187.jpg +15480.jpg +24407.jpg +29114.jpg +18592.jpg +26345.jpg +19336.jpg +10702.jpg +15234.jpg +20040.jpg +24133.jpg +9123.jpg +9298.jpg +17988.jpg +18639.jpg +17160.jpg +16535.jpg +2690.jpg +359.jpg +23198.jpg +18346.jpg +350.jpg +25357.jpg +27164.jpg +9470.jpg +565.jpg +23699.jpg +9816.jpg +9381.jpg +15765.jpg +7726.jpg +19686.jpg +5319.jpg +20084.jpg +21127.jpg +10756.jpg +8057.jpg +26690.jpg +2790.jpg +23945.jpg +863.jpg +21603.jpg +11663.jpg +28262.jpg +3805.jpg +21344.jpg +8913.jpg +16370.jpg +11437.jpg +23323.jpg +28482.jpg +3383.jpg +19236.jpg +19402.jpg +23129.jpg +24134.jpg +12812.jpg +23084.jpg +19835.jpg +25323.jpg +2959.jpg +25530.jpg +105.jpg +17195.jpg +14444.jpg +18479.jpg +6210.jpg +9078.jpg +16286.jpg +19004.jpg +12368.jpg +17604.jpg +15489.jpg +24006.jpg +16112.jpg +17358.jpg +15956.jpg +27563.jpg +6112.jpg +6501.jpg +9397.jpg +3192.jpg +11902.jpg +12601.jpg +14462.jpg +7617.jpg +13201.jpg +17419.jpg +14704.jpg +19258.jpg +8239.jpg +8548.jpg +9900.jpg +21155.jpg +20178.jpg +4269.jpg +10437.jpg +18737.jpg +19334.jpg +21347.jpg +6856.jpg +6531.jpg +21826.jpg +12569.jpg +12049.jpg +7604.jpg +16464.jpg +17938.jpg +20928.jpg +27004.jpg +16069.jpg +11936.jpg +21739.jpg +21522.jpg +13827.jpg +18862.jpg +1368.jpg +21112.jpg +10866.jpg +28335.jpg +21341.jpg +6911.jpg +26338.jpg +26089.jpg +402.jpg +14429.jpg +22244.jpg +11687.jpg +24066.jpg +7140.jpg +25823.jpg +13081.jpg +16793.jpg +1505.jpg +13853.jpg +8102.jpg +10471.jpg +29350.jpg +9752.jpg +4574.jpg +19337.jpg +8419.jpg +22157.jpg +13051.jpg +11584.jpg +13440.jpg +2562.jpg +3912.jpg +5083.jpg +16979.jpg +7250.jpg +28766.jpg +18236.jpg +16308.jpg +2860.jpg +1308.jpg +15756.jpg +20088.jpg +24392.jpg +10025.jpg +4417.jpg +6479.jpg +12435.jpg +26339.jpg +10818.jpg +7105.jpg +15471.jpg +2998.jpg +29495.jpg +29882.jpg +7246.jpg +26944.jpg +8076.jpg +26013.jpg +23066.jpg +21438.jpg +17063.jpg +21302.jpg +27862.jpg +8879.jpg +9694.jpg +3032.jpg +9376.jpg +17299.jpg +16585.jpg +5290.jpg +17554.jpg +7681.jpg +22132.jpg +17508.jpg +12198.jpg +2961.jpg +14127.jpg +5550.jpg +25681.jpg +24537.jpg +9337.jpg +8873.jpg +24181.jpg +10278.jpg +10708.jpg +23337.jpg +8294.jpg +13856.jpg +13933.jpg +10676.jpg +5269.jpg +22327.jpg +10418.jpg +27807.jpg +24550.jpg +3578.jpg +18194.jpg +15153.jpg +8326.jpg +19897.jpg +7399.jpg +20052.jpg +24713.jpg +7211.jpg +7765.jpg +28868.jpg +23954.jpg +4129.jpg +25543.jpg +3478.jpg +11120.jpg +8324.jpg +25131.jpg +4511.jpg +21257.jpg +16462.jpg +10971.jpg +5751.jpg +27187.jpg +27884.jpg +16538.jpg +12924.jpg +24494.jpg +21888.jpg +16478.jpg +29598.jpg +17819.jpg +12455.jpg +7699.jpg +6428.jpg +17505.jpg +23372.jpg +24556.jpg +5849.jpg +26041.jpg +5394.jpg +17258.jpg +24253.jpg +9098.jpg +8006.jpg +22401.jpg +7985.jpg +25887.jpg +14735.jpg +9292.jpg +9167.jpg +26233.jpg +1632.jpg +26586.jpg +18560.jpg +7057.jpg +423.jpg +11483.jpg +19424.jpg +5971.jpg +2866.jpg +7598.jpg +4697.jpg +27805.jpg +25547.jpg +10454.jpg +9327.jpg +11966.jpg +20734.jpg +20458.jpg +3309.jpg +16282.jpg +9493.jpg +28472.jpg +1430.jpg +22475.jpg +21258.jpg +25877.jpg +27651.jpg +3880.jpg +24378.jpg +10126.jpg +2034.jpg +6644.jpg +12257.jpg +12788.jpg +14341.jpg +20775.jpg +24249.jpg +28506.jpg +23284.jpg +6088.jpg +19914.jpg +29914.jpg +6185.jpg +4675.jpg +23713.jpg +25470.jpg +14669.jpg +15627.jpg +4063.jpg +15578.jpg +4472.jpg +2294.jpg +22217.jpg +16901.jpg +25420.jpg +22889.jpg +4635.jpg +7409.jpg +6483.jpg +618.jpg +1331.jpg +21132.jpg +9108.jpg +13012.jpg +4163.jpg +17515.jpg +27330.jpg +25842.jpg +18909.jpg +18225.jpg +22430.jpg +16198.jpg +5896.jpg +28000.jpg +13214.jpg +6906.jpg +14257.jpg +22811.jpg +12985.jpg +6759.jpg +13564.jpg +2986.jpg +24022.jpg +58.jpg +25181.jpg +21664.jpg +20553.jpg +25332.jpg +28374.jpg +14659.jpg +15943.jpg +7070.jpg +8836.jpg +4464.jpg +1209.jpg +25950.jpg +25708.jpg +17500.jpg +1543.jpg +23783.jpg +1563.jpg +14741.jpg +24601.jpg +8805.jpg +12927.jpg +1042.jpg +8335.jpg +14570.jpg +16338.jpg +25414.jpg +5030.jpg +16967.jpg +4616.jpg +23258.jpg +7707.jpg +21950.jpg +8488.jpg +6484.jpg +6436.jpg +27146.jpg +27939.jpg +27081.jpg +27496.jpg +18554.jpg +8007.jpg +23283.jpg +9644.jpg +10662.jpg +17053.jpg +22265.jpg +27744.jpg +5670.jpg +18646.jpg +16466.jpg +4736.jpg +22657.jpg +12351.jpg +22222.jpg +23670.jpg +8117.jpg +15417.jpg +511.jpg +4387.jpg +20539.jpg +9424.jpg +6340.jpg +14321.jpg +5852.jpg +6768.jpg +11300.jpg +16418.jpg +1411.jpg +12357.jpg +20306.jpg +28292.jpg +14705.jpg +3922.jpg +28411.jpg +18396.jpg +24357.jpg +8889.jpg +6933.jpg +9260.jpg +27297.jpg +23234.jpg +25406.jpg +14726.jpg +7152.jpg +17103.jpg +2041.jpg +6213.jpg +26356.jpg +21831.jpg +1337.jpg +11669.jpg +12229.jpg +12096.jpg +16434.jpg +21144.jpg +7679.jpg +15807.jpg +13043.jpg +20856.jpg +18393.jpg +23720.jpg +19290.jpg +16475.jpg +5031.jpg +11599.jpg +23609.jpg +27774.jpg +2249.jpg +29452.jpg +3415.jpg +17085.jpg +8706.jpg +4812.jpg +19065.jpg +25826.jpg +25306.jpg +27338.jpg +15646.jpg +3682.jpg +16107.jpg +10794.jpg +29241.jpg +14561.jpg +14436.jpg +24978.jpg +7531.jpg +20392.jpg +28304.jpg +29628.jpg +8626.jpg +7071.jpg +16105.jpg +27056.jpg +3242.jpg +25996.jpg +21555.jpg +24010.jpg +4863.jpg +6839.jpg +14882.jpg +8685.jpg +5228.jpg +21542.jpg +13148.jpg +7467.jpg +20762.jpg +18860.jpg +10301.jpg +5790.jpg +13286.jpg +27404.jpg +26784.jpg +19516.jpg +17723.jpg +10764.jpg +15544.jpg +6826.jpg +16153.jpg +8215.jpg +17545.jpg +23777.jpg +29927.jpg +20958.jpg +23433.jpg +26479.jpg +1801.jpg +28198.jpg +17897.jpg +29117.jpg +8771.jpg +6377.jpg +8984.jpg +17594.jpg +25482.jpg +9671.jpg +707.jpg +29367.jpg +8707.jpg +3882.jpg +12798.jpg +16261.jpg +29439.jpg +15927.jpg +1433.jpg +6867.jpg +5880.jpg +13039.jpg +3144.jpg +7103.jpg +400.jpg +7348.jpg +22336.jpg +25203.jpg +23955.jpg +13541.jpg +28380.jpg +2418.jpg +29078.jpg +23997.jpg +9322.jpg +7746.jpg +29167.jpg +2605.jpg +25491.jpg +14407.jpg +17598.jpg +12255.jpg +8376.jpg +10718.jpg +23300.jpg +14222.jpg +25425.jpg +20370.jpg +13896.jpg +25430.jpg +7542.jpg +28512.jpg +14332.jpg +14806.jpg +936.jpg +8939.jpg +10018.jpg +26394.jpg +5318.jpg +28115.jpg +24175.jpg +29201.jpg +13980.jpg +28956.jpg +2651.jpg +5367.jpg +11163.jpg +12571.jpg +4374.jpg +26099.jpg +1784.jpg +29528.jpg +25633.jpg +4049.jpg +29747.jpg +726.jpg +13162.jpg +20510.jpg +11521.jpg +3035.jpg +16508.jpg +8298.jpg +19463.jpg +8333.jpg +21476.jpg +4831.jpg +5108.jpg +20971.jpg +28860.jpg +20238.jpg +7358.jpg +26237.jpg +13818.jpg +13446.jpg +15232.jpg +4998.jpg +25197.jpg +18742.jpg +3406.jpg +27655.jpg +28009.jpg +6961.jpg +2742.jpg +14787.jpg +28998.jpg +29991.jpg +25983.jpg +22438.jpg +8195.jpg +22647.jpg +25561.jpg +8003.jpg +8431.jpg +12122.jpg +28622.jpg +6269.jpg +26906.jpg +18495.jpg +10869.jpg +19058.jpg +3227.jpg +28964.jpg +29174.jpg +2428.jpg +7302.jpg +29462.jpg +249.jpg +8747.jpg +10945.jpg +6654.jpg +27849.jpg +10965.jpg +11862.jpg +24262.jpg +1742.jpg +10121.jpg +6766.jpg +16721.jpg +5879.jpg +4445.jpg +27634.jpg +26324.jpg +11673.jpg +20979.jpg +23686.jpg +5657.jpg +112.jpg +22292.jpg +7554.jpg +2872.jpg +29918.jpg +20351.jpg +1979.jpg +13561.jpg +2580.jpg +13752.jpg +24169.jpg +2340.jpg +6392.jpg +22978.jpg +1839.jpg +1597.jpg +27196.jpg +25755.jpg +26858.jpg +16389.jpg +12664.jpg +28357.jpg +3906.jpg +22808.jpg +6729.jpg +27708.jpg +20973.jpg +11343.jpg +14966.jpg +15277.jpg +26589.jpg +21655.jpg +11289.jpg +14265.jpg +18669.jpg +2214.jpg +16868.jpg +28879.jpg +16120.jpg +5813.jpg +10936.jpg +15487.jpg +26293.jpg +29543.jpg +16913.jpg +5184.jpg +18572.jpg +8832.jpg +15623.jpg +24373.jpg +7248.jpg +16970.jpg +21846.jpg +19988.jpg +19607.jpg +1094.jpg +542.jpg +25229.jpg +19838.jpg +17716.jpg +2917.jpg +21032.jpg +1778.jpg +23295.jpg +6730.jpg +22113.jpg +6240.jpg +29754.jpg +15189.jpg +16992.jpg +5463.jpg +19554.jpg +14891.jpg +21595.jpg +8390.jpg +15301.jpg +569.jpg +15269.jpg +26058.jpg +23775.jpg +21859.jpg +13729.jpg +2211.jpg +7402.jpg +12903.jpg +29038.jpg +24885.jpg +17963.jpg +20849.jpg +15770.jpg +11096.jpg +25724.jpg +157.jpg +18910.jpg +10151.jpg +27896.jpg +9232.jpg +5241.jpg +15208.jpg +4165.jpg +25641.jpg +6683.jpg +1939.jpg +5999.jpg +14172.jpg +8463.jpg +19615.jpg +24425.jpg +10327.jpg +18000.jpg +9505.jpg +7788.jpg +6058.jpg +17390.jpg +29124.jpg +16872.jpg +6504.jpg +11134.jpg +23595.jpg +29.jpg +19315.jpg +19983.jpg +13443.jpg +27174.jpg +13323.jpg +595.jpg +21709.jpg +17792.jpg +22865.jpg +29248.jpg +2196.jpg +3791.jpg +26189.jpg +11349.jpg +12267.jpg +2281.jpg +15558.jpg +23767.jpg +8404.jpg +26218.jpg +9471.jpg +7175.jpg +26607.jpg +7500.jpg +9842.jpg +23569.jpg +5301.jpg +21294.jpg +17283.jpg +26051.jpg +2882.jpg +21283.jpg +11913.jpg +4539.jpg +16279.jpg +13747.jpg +8838.jpg +772.jpg +5982.jpg +23308.jpg +25897.jpg +12883.jpg +13692.jpg +9088.jpg +3438.jpg +29692.jpg +646.jpg +29382.jpg +731.jpg +13930.jpg +9497.jpg +451.jpg +8252.jpg +533.jpg +12976.jpg +24636.jpg +4580.jpg +20433.jpg +22852.jpg +15902.jpg +8785.jpg +22558.jpg +10897.jpg +8585.jpg +8319.jpg +16807.jpg +3355.jpg +18845.jpg +23879.jpg +9043.jpg +16355.jpg +21460.jpg +504.jpg +21878.jpg +9532.jpg +14506.jpg +1816.jpg +21023.jpg +18743.jpg +24621.jpg +23250.jpg +404.jpg +17620.jpg +19109.jpg +22167.jpg +16904.jpg +22264.jpg +5479.jpg +20653.jpg +21975.jpg +11923.jpg +604.jpg +1504.jpg +29950.jpg +6437.jpg +17544.jpg +11760.jpg +11459.jpg +18128.jpg +11474.jpg +5950.jpg +6219.jpg +16737.jpg +10525.jpg +25339.jpg +14402.jpg +10465.jpg +5579.jpg +3115.jpg +16916.jpg +16852.jpg +26219.jpg +1573.jpg +8571.jpg +19547.jpg +26173.jpg +2043.jpg +25618.jpg +1851.jpg +16222.jpg +959.jpg +2776.jpg +26782.jpg +27408.jpg +9110.jpg +8623.jpg +19300.jpg +26702.jpg +27581.jpg +7832.jpg +3896.jpg +9778.jpg +27809.jpg +1990.jpg +8829.jpg +874.jpg +6726.jpg +16121.jpg +4416.jpg +777.jpg +6720.jpg +10887.jpg +22705.jpg +1542.jpg +3233.jpg +12044.jpg +19567.jpg +22633.jpg +9710.jpg +11174.jpg +20730.jpg +23318.jpg +16951.jpg +26852.jpg +9352.jpg +12395.jpg +1715.jpg +20401.jpg +29383.jpg +6762.jpg +28943.jpg +18253.jpg +14065.jpg +15987.jpg +20112.jpg +1967.jpg +23970.jpg +6049.jpg +15791.jpg +3274.jpg +18691.jpg +12679.jpg +13682.jpg +28366.jpg +25398.jpg +11262.jpg +25839.jpg +22667.jpg +13108.jpg +18115.jpg +20119.jpg +22009.jpg +20373.jpg +29426.jpg +4032.jpg +16762.jpg +29853.jpg +10594.jpg +27072.jpg +11296.jpg +12412.jpg +15595.jpg +24813.jpg +29993.jpg +10.jpg +21167.jpg +10382.jpg +14731.jpg +9958.jpg +27626.jpg +28261.jpg +29854.jpg +29433.jpg +29488.jpg +4174.jpg +14547.jpg +12128.jpg +14359.jpg +4518.jpg +4420.jpg +3802.jpg +24524.jpg +7443.jpg +7048.jpg +15885.jpg +18186.jpg +19529.jpg +4432.jpg +21970.jpg +23166.jpg +22503.jpg +12626.jpg +19304.jpg +3895.jpg +19496.jpg +23189.jpg +19623.jpg +13062.jpg +14042.jpg +11696.jpg +6401.jpg +6689.jpg +26462.jpg +1602.jpg +349.jpg +23056.jpg +9188.jpg +1259.jpg +27743.jpg +14533.jpg +23257.jpg +21.jpg +21560.jpg +4379.jpg +351.jpg +10643.jpg +26626.jpg +22451.jpg +26715.jpg +26167.jpg +18460.jpg +3278.jpg +4889.jpg +23431.jpg +29242.jpg +17101.jpg +5223.jpg +3884.jpg +19655.jpg +829.jpg +12371.jpg +11876.jpg +3250.jpg +1357.jpg +20541.jpg +6637.jpg +6489.jpg +20722.jpg +7956.jpg +3849.jpg +22417.jpg +19625.jpg +22575.jpg +6047.jpg +1213.jpg +21368.jpg +14953.jpg +12099.jpg +11532.jpg +16665.jpg +18242.jpg +13761.jpg +21019.jpg +22231.jpg +10211.jpg +19982.jpg +10741.jpg +6625.jpg +23880.jpg +19223.jpg +21967.jpg +4327.jpg +1527.jpg +16124.jpg +15910.jpg +15342.jpg +16938.jpg +8269.jpg +14709.jpg +7791.jpg +15260.jpg +21323.jpg +3822.jpg +8688.jpg +7936.jpg +1835.jpg +25035.jpg +24628.jpg +27363.jpg +21849.jpg +377.jpg +24502.jpg +14537.jpg +18092.jpg +2071.jpg +7713.jpg +940.jpg +26436.jpg +4381.jpg +4605.jpg +25660.jpg +16751.jpg +23322.jpg +21869.jpg +19946.jpg +6585.jpg +27683.jpg +24527.jpg +3576.jpg +28843.jpg +29736.jpg +20046.jpg +4890.jpg +7551.jpg +6349.jpg +10621.jpg +1988.jpg +2319.jpg +9428.jpg +1180.jpg +18110.jpg +9886.jpg +8902.jpg +9358.jpg +9190.jpg +22274.jpg +5055.jpg +27534.jpg +14015.jpg +10137.jpg +6212.jpg +24371.jpg +19630.jpg +11130.jpg +2210.jpg +12427.jpg +24752.jpg +26188.jpg +24790.jpg +2746.jpg +502.jpg +26993.jpg +6039.jpg +25120.jpg +5681.jpg +7846.jpg +22921.jpg +29163.jpg +29273.jpg +23597.jpg +29479.jpg +18280.jpg +18951.jpg +20724.jpg +4729.jpg +16994.jpg +13229.jpg +3501.jpg +11865.jpg +14967.jpg +8642.jpg +10054.jpg +17298.jpg +24586.jpg +8788.jpg +23768.jpg +17245.jpg +10300.jpg +5081.jpg +3674.jpg +18885.jpg +14677.jpg +10569.jpg +25178.jpg +3815.jpg +25697.jpg +21430.jpg +21267.jpg +13002.jpg +26250.jpg +2891.jpg +7594.jpg +652.jpg +16768.jpg +9258.jpg +22286.jpg +4006.jpg +5614.jpg +18148.jpg +4391.jpg +28061.jpg +7727.jpg +12375.jpg +12926.jpg +4705.jpg +4811.jpg +28851.jpg +26848.jpg +18551.jpg +19976.jpg +1815.jpg +17314.jpg +5233.jpg +2531.jpg +15182.jpg +541.jpg +868.jpg +3752.jpg +28796.jpg +25972.jpg +19360.jpg +10006.jpg +19188.jpg +20791.jpg +23888.jpg +18370.jpg +7983.jpg +26391.jpg +11382.jpg +18415.jpg +13603.jpg +13403.jpg +1552.jpg +14367.jpg +10246.jpg +2439.jpg +23770.jpg +20540.jpg +21195.jpg +11666.jpg +6790.jpg +18926.jpg +2305.jpg +1222.jpg +7787.jpg +29935.jpg +24916.jpg +8477.jpg +12528.jpg +871.jpg +18408.jpg +19735.jpg +22576.jpg +8624.jpg +8719.jpg +1690.jpg +12752.jpg +11661.jpg +22091.jpg +20385.jpg +26380.jpg +17139.jpg +10510.jpg +5646.jpg +17349.jpg +7309.jpg +13197.jpg +5009.jpg +26402.jpg +28267.jpg +3823.jpg +14686.jpg +18279.jpg +24445.jpg +4026.jpg +23687.jpg +27816.jpg +7859.jpg +16018.jpg +3876.jpg +23746.jpg +9700.jpg +5577.jpg +22840.jpg +9770.jpg +19764.jpg +10584.jpg +5899.jpg +17773.jpg +383.jpg +13103.jpg +19772.jpg +23145.jpg +7646.jpg +25008.jpg +6469.jpg +6817.jpg +24106.jpg +11880.jpg +26528.jpg +24125.jpg +1953.jpg +16781.jpg +26985.jpg +7133.jpg +6676.jpg +22083.jpg +8023.jpg +15540.jpg +6299.jpg +21731.jpg +23876.jpg +813.jpg +14888.jpg +22253.jpg +4582.jpg +12782.jpg +12296.jpg +18761.jpg +29653.jpg +14080.jpg +11918.jpg +10909.jpg +26239.jpg +5983.jpg +24947.jpg +26550.jpg +15078.jpg +5152.jpg +7860.jpg +89.jpg +18022.jpg +16327.jpg +16070.jpg +3776.jpg +20446.jpg +28735.jpg +26280.jpg +24272.jpg +25006.jpg +8052.jpg +27032.jpg +28633.jpg +21885.jpg +15698.jpg +19756.jpg +10977.jpg +22038.jpg +10925.jpg +9524.jpg +9227.jpg +25616.jpg +14391.jpg +28233.jpg +10324.jpg +6145.jpg +15953.jpg +23026.jpg +11656.jpg +29144.jpg +3865.jpg +3886.jpg +14748.jpg +17008.jpg +13573.jpg +26133.jpg +9171.jpg +2184.jpg +14970.jpg +7455.jpg +18255.jpg +2938.jpg +23534.jpg +27605.jpg +6303.jpg +6404.jpg +5368.jpg +21945.jpg +6619.jpg +24119.jpg +18476.jpg +14627.jpg +28159.jpg +21394.jpg +7451.jpg +10028.jpg +9919.jpg +14200.jpg +28748.jpg +13941.jpg +7468.jpg +13925.jpg +20647.jpg +12769.jpg +5573.jpg +20100.jpg +22570.jpg +15832.jpg +20663.jpg +20016.jpg +3014.jpg +6426.jpg +19430.jpg +19453.jpg +29527.jpg +29119.jpg +29197.jpg +20646.jpg +3793.jpg +9976.jpg +15679.jpg +25165.jpg +14802.jpg +2321.jpg +24706.jpg +699.jpg +26165.jpg +11398.jpg +332.jpg +28316.jpg +5636.jpg +26549.jpg +556.jpg +7854.jpg +19124.jpg +18872.jpg +3318.jpg +12833.jpg +25628.jpg +4558.jpg +2944.jpg +22721.jpg +21814.jpg +29886.jpg +14932.jpg +7109.jpg +1723.jpg +23422.jpg +25581.jpg +25401.jpg +14883.jpg +10990.jpg +6578.jpg +10316.jpg +22904.jpg +18792.jpg +20625.jpg +23711.jpg +4887.jpg +14223.jpg +15613.jpg +524.jpg +25813.jpg +2182.jpg +12609.jpg +23766.jpg +29780.jpg +27658.jpg +9208.jpg +23898.jpg +26245.jpg +11067.jpg +27673.jpg +17724.jpg +14603.jpg +15445.jpg +26334.jpg +16043.jpg +11137.jpg +21297.jpg +18189.jpg +9774.jpg +2719.jpg +27133.jpg +16517.jpg +9202.jpg +10291.jpg +13441.jpg +7621.jpg +23176.jpg +23274.jpg +22246.jpg +22736.jpg +12797.jpg +9199.jpg +6638.jpg +22584.jpg +7952.jpg +12629.jpg +26196.jpg +644.jpg +14649.jpg +20700.jpg +16037.jpg +19726.jpg +394.jpg +27080.jpg +4339.jpg +24639.jpg +2056.jpg +9458.jpg +22621.jpg +1905.jpg +9492.jpg +6205.jpg +27668.jpg +25268.jpg +15341.jpg +1663.jpg +24320.jpg +11940.jpg +15390.jpg +20815.jpg +9180.jpg +608.jpg +16572.jpg +13821.jpg +15396.jpg +5588.jpg +27837.jpg +20846.jpg +5139.jpg +26080.jpg +27471.jpg +11304.jpg +6706.jpg +16114.jpg +8414.jpg +17412.jpg +5186.jpg +29493.jpg +8421.jpg +16265.jpg +22817.jpg +19970.jpg +21694.jpg +5844.jpg +16292.jpg +22149.jpg +8368.jpg +11237.jpg +27234.jpg +22795.jpg +6664.jpg +24530.jpg +27965.jpg +7739.jpg +20875.jpg +26737.jpg +14672.jpg +13993.jpg +1174.jpg +9021.jpg +23538.jpg +11336.jpg +4027.jpg +1262.jpg +8323.jpg +23921.jpg +2405.jpg +1166.jpg +27666.jpg +21786.jpg +5213.jpg +15787.jpg +26285.jpg +17432.jpg +23363.jpg +22197.jpg +491.jpg +122.jpg +8562.jpg +14586.jpg +27365.jpg +24966.jpg +25183.jpg +13869.jpg +11892.jpg +19886.jpg +216.jpg +29952.jpg +6208.jpg +2045.jpg +17043.jpg +18060.jpg +27235.jpg +21025.jpg +28867.jpg +6656.jpg +26156.jpg +2470.jpg +26077.jpg +11332.jpg +2484.jpg +7356.jpg +29057.jpg +25610.jpg +2230.jpg +24031.jpg +12454.jpg +12077.jpg +16799.jpg +21586.jpg +18012.jpg +11212.jpg +24052.jpg +8720.jpg +2737.jpg +7773.jpg +16987.jpg +14545.jpg +2504.jpg +2786.jpg +11407.jpg +11031.jpg +29941.jpg +28450.jpg +4428.jpg +14835.jpg +5455.jpg +27861.jpg +28027.jpg +10906.jpg +22512.jpg +2235.jpg +23419.jpg +22364.jpg +7947.jpg +23062.jpg +11974.jpg +25961.jpg +8844.jpg +21064.jpg +12920.jpg +25215.jpg +824.jpg +22979.jpg +14178.jpg +9251.jpg +11724.jpg +16230.jpg +4086.jpg +12040.jpg +19883.jpg +6536.jpg +21359.jpg +9132.jpg +20800.jpg +27983.jpg +5308.jpg +6425.jpg +14198.jpg +10385.jpg +12431.jpg +27001.jpg +20452.jpg +8581.jpg +17987.jpg +20010.jpg +22114.jpg +2966.jpg +26017.jpg +26389.jpg +10730.jpg +25149.jpg +639.jpg +15406.jpg +1617.jpg +9673.jpg +24360.jpg +17536.jpg +14949.jpg +15375.jpg +7851.jpg +20399.jpg +21769.jpg +15913.jpg +7318.jpg +20496.jpg +10856.jpg +9104.jpg +18868.jpg +28896.jpg +24349.jpg +24383.jpg +12757.jpg +7545.jpg +10430.jpg +4683.jpg +18500.jpg +23192.jpg +29992.jpg +2604.jpg +9365.jpg +13799.jpg +22501.jpg +26686.jpg +19848.jpg +2676.jpg +12932.jpg +21313.jpg +26328.jpg +24762.jpg +11615.jpg +15412.jpg +27027.jpg +353.jpg +4724.jpg +17563.jpg +26992.jpg +16178.jpg +22903.jpg +27189.jpg +6476.jpg +1301.jpg +10235.jpg +18182.jpg +26590.jpg +17107.jpg +18467.jpg +7255.jpg +18312.jpg +7653.jpg +20717.jpg +5551.jpg +12572.jpg +23820.jpg +6617.jpg +24898.jpg +6287.jpg +24908.jpg +17532.jpg +5804.jpg +2317.jpg +19782.jpg +2662.jpg +10477.jpg +27749.jpg +27648.jpg +23181.jpg +9643.jpg +22696.jpg +20918.jpg +2570.jpg +5067.jpg +24101.jpg +26116.jpg +27239.jpg +10796.jpg +4132.jpg +9620.jpg +24005.jpg +8718.jpg +24992.jpg +8722.jpg +19146.jpg +29579.jpg +13210.jpg +15724.jpg +21011.jpg +12690.jpg +25037.jpg +13223.jpg +18474.jpg +8565.jpg +2468.jpg +1724.jpg +5421.jpg +1983.jpg +8438.jpg +5156.jpg +14285.jpg +5433.jpg +9956.jpg +24568.jpg +24416.jpg +4552.jpg +11003.jpg +25428.jpg +8652.jpg +27889.jpg +25577.jpg +28811.jpg +15473.jpg +8639.jpg +6063.jpg +7213.jpg +24475.jpg +16845.jpg +15112.jpg +29386.jpg +26108.jpg +17512.jpg +3108.jpg +25480.jpg +25639.jpg +7933.jpg +13643.jpg +22066.jpg +26824.jpg +25200.jpg +26743.jpg +18623.jpg +26223.jpg +12683.jpg +27931.jpg +10400.jpg +27461.jpg +8792.jpg +3432.jpg +20431.jpg +2844.jpg +17673.jpg +15653.jpg +2456.jpg +21358.jpg +24060.jpg +21216.jpg +22640.jpg +8909.jpg +3840.jpg +3163.jpg +4732.jpg +14995.jpg +14492.jpg +4494.jpg +4051.jpg +15594.jpg +20171.jpg +26869.jpg +13562.jpg +7924.jpg +23067.jpg +20047.jpg +27250.jpg +13313.jpg +15443.jpg +28683.jpg +4568.jpg +29450.jpg +7752.jpg +14623.jpg +10485.jpg +24271.jpg +13797.jpg +27386.jpg +29080.jpg +6556.jpg +27715.jpg +25487.jpg +14727.jpg +16941.jpg +26029.jpg +26805.jpg +16224.jpg +3204.jpg +24048.jpg +12024.jpg +20640.jpg +830.jpg +10067.jpg +20483.jpg +5066.jpg +23214.jpg +11605.jpg +8952.jpg +850.jpg +25496.jpg +13518.jpg +8242.jpg +3996.jpg +6000.jpg +18977.jpg +17867.jpg +16426.jpg +21691.jpg +11458.jpg +8373.jpg +12251.jpg +15160.jpg +11275.jpg +8068.jpg +7020.jpg +27700.jpg +13026.jpg +22612.jpg +10043.jpg +20407.jpg +2800.jpg +23023.jpg +10942.jpg +717.jpg +12523.jpg +952.jpg +5098.jpg +8753.jpg +29544.jpg +10469.jpg +2452.jpg +21591.jpg +5931.jpg +1156.jpg +3467.jpg +23454.jpg +6496.jpg +520.jpg +1247.jpg +2534.jpg +6379.jpg +656.jpg +19226.jpg +26174.jpg +28035.jpg +3971.jpg +15210.jpg +15483.jpg +20090.jpg +7703.jpg +17044.jpg +11073.jpg +12194.jpg +6996.jpg +10793.jpg +22158.jpg +19971.jpg +4310.jpg +12017.jpg +18723.jpg +744.jpg +8672.jpg +3376.jpg +15711.jpg +17199.jpg +21809.jpg +4681.jpg +22598.jpg +28230.jpg +22669.jpg +5178.jpg +11263.jpg +5676.jpg +6668.jpg +14387.jpg +11451.jpg +1461.jpg +7968.jpg +17408.jpg +14951.jpg +16232.jpg +29742.jpg +18116.jpg +9308.jpg +11365.jpg +17488.jpg +10340.jpg +22252.jpg +19019.jpg +11738.jpg +26047.jpg +28349.jpg +5700.jpg +29606.jpg +12256.jpg +19393.jpg +16749.jpg +23088.jpg +6394.jpg +18598.jpg +12419.jpg +12719.jpg +29040.jpg +28649.jpg +8910.jpg +14906.jpg +4563.jpg +9763.jpg +29162.jpg +19808.jpg +29929.jpg +622.jpg +14277.jpg +11574.jpg +23241.jpg +3992.jpg +13119.jpg +9964.jpg +18127.jpg +23075.jpg +29858.jpg +2799.jpg +11227.jpg +18156.jpg +2699.jpg +29921.jpg +8468.jpg +15915.jpg +19213.jpg +23580.jpg +1427.jpg +25478.jpg +429.jpg +10684.jpg +7490.jpg +25750.jpg +16324.jpg +9582.jpg +19950.jpg +20216.jpg +11909.jpg +10864.jpg +20408.jpg +21911.jpg +25463.jpg +24749.jpg +19014.jpg +16957.jpg +5177.jpg +24422.jpg +2999.jpg +9947.jpg +6493.jpg +25076.jpg +16847.jpg +1692.jpg +28802.jpg +15503.jpg +19320.jpg +5629.jpg +17822.jpg +11947.jpg +1165.jpg +6588.jpg +5189.jpg +5728.jpg +23012.jpg +25762.jpg +20779.jpg +15114.jpg +9333.jpg +13058.jpg +21470.jpg +8746.jpg +18094.jpg +13763.jpg +19046.jpg +20048.jpg +25444.jpg +22082.jpg +27100.jpg +27735.jpg +12984.jpg +19822.jpg +26046.jpg +26274.jpg +21062.jpg +29006.jpg +176.jpg +12997.jpg +24492.jpg +15120.jpg +28631.jpg +4551.jpg +4885.jpg +13647.jpg +27074.jpg +1646.jpg +29970.jpg +18971.jpg +24328.jpg +23148.jpg +4903.jpg +18777.jpg +22805.jpg +17552.jpg +1645.jpg +20744.jpg +1351.jpg +12462.jpg +1387.jpg +27041.jpg +2873.jpg +23450.jpg +26398.jpg +23044.jpg +966.jpg +21597.jpg +25958.jpg +2649.jpg +28621.jpg +27303.jpg +18881.jpg +27516.jpg +7918.jpg +8209.jpg +5497.jpg +8480.jpg +15705.jpg +7045.jpg +21373.jpg +17385.jpg +12533.jpg +17346.jpg +21880.jpg +9340.jpg +29735.jpg +18133.jpg +7605.jpg +13090.jpg +17136.jpg +26084.jpg +19634.jpg +12881.jpg +16071.jpg +4544.jpg +18629.jpg +4658.jpg +13420.jpg +9270.jpg +13211.jpg +17396.jpg +26278.jpg +14521.jpg +26707.jpg +5797.jpg +14846.jpg +26272.jpg +6627.jpg +20495.jpg +10999.jpg +12069.jpg +2812.jpg +6474.jpg +5661.jpg +3574.jpg +21022.jpg +17562.jpg +10428.jpg +24957.jpg +19703.jpg +3220.jpg +17637.jpg +2424.jpg +7054.jpg +23520.jpg +22443.jpg +5644.jpg +12290.jpg +13004.jpg +2646.jpg +29356.jpg +11363.jpg +23314.jpg +15268.jpg +17809.jpg +12154.jpg +1093.jpg +24572.jpg +23040.jpg +6767.jpg +21020.jpg +16328.jpg +17341.jpg +7548.jpg +8475.jpg +19041.jpg +3561.jpg +16368.jpg +7685.jpg +8922.jpg +22885.jpg +6567.jpg +21392.jpg +9363.jpg +16325.jpg +15082.jpg +23592.jpg +29240.jpg +2968.jpg +9531.jpg +8820.jpg +771.jpg +6577.jpg +10360.jpg +15879.jpg +10522.jpg +14952.jpg +6006.jpg +12915.jpg +859.jpg +26499.jpg +13309.jpg +21006.jpg +19414.jpg +15852.jpg +13085.jpg +11778.jpg +12139.jpg +22023.jpg +10434.jpg +17521.jpg +7421.jpg +1200.jpg +25962.jpg +23650.jpg +12828.jpg +16579.jpg +10109.jpg +16413.jpg +26897.jpg +9417.jpg +27273.jpg +28055.jpg +19736.jpg +28510.jpg +13666.jpg +15130.jpg +14697.jpg +15349.jpg +23393.jpg +17922.jpg +18252.jpg +29937.jpg +1641.jpg +24499.jpg +29218.jpg +21924.jpg +26266.jpg +17372.jpg +20000.jpg +13928.jpg +17353.jpg +25374.jpg +5329.jpg +13939.jpg +25971.jpg +18767.jpg +11320.jpg +11090.jpg +20278.jpg +14720.jpg +9022.jpg +22560.jpg +16401.jpg +6206.jpg +19564.jpg +23861.jpg +26584.jpg +12946.jpg +27935.jpg +18672.jpg +2199.jpg +13966.jpg +21082.jpg +8533.jpg +19436.jpg +11864.jpg +15002.jpg +8037.jpg +21766.jpg +29725.jpg +8211.jpg +25526.jpg +29504.jpg +18601.jpg +27572.jpg +9433.jpg +27740.jpg +13952.jpg +13971.jpg +23778.jpg +68.jpg +17197.jpg +22418.jpg +19119.jpg +277.jpg +5188.jpg +24838.jpg +7852.jpg +21307.jpg +1507.jpg +4242.jpg +10599.jpg +26898.jpg +10125.jpg +946.jpg +1578.jpg +12366.jpg +11221.jpg +16610.jpg +17640.jpg +22133.jpg +26087.jpg +13844.jpg +11030.jpg +13145.jpg +18995.jpg +16035.jpg +15555.jpg +22206.jpg +15741.jpg +5161.jpg +11967.jpg +23449.jpg +1686.jpg +5718.jpg +23536.jpg +19462.jpg +22456.jpg +943.jpg +25255.jpg +25527.jpg +7457.jpg +4291.jpg +4411.jpg +20227.jpg +29884.jpg +27472.jpg +9474.jpg +28709.jpg +15414.jpg +22603.jpg +3810.jpg +5976.jpg +8026.jpg +9737.jpg +12109.jpg +13478.jpg +17489.jpg +10217.jpg +11965.jpg +28305.jpg +26986.jpg +18783.jpg +8607.jpg +27980.jpg +25412.jpg +1850.jpg +6965.jpg +11103.jpg +11046.jpg +10637.jpg +6275.jpg +534.jpg +28886.jpg +27898.jpg +17848.jpg +18274.jpg +5170.jpg +16681.jpg +2981.jpg +9612.jpg +27827.jpg +1874.jpg +28530.jpg +29185.jpg +1005.jpg +24541.jpg +12222.jpg +2353.jpg +18473.jpg +10903.jpg +27686.jpg +16918.jpg +15678.jpg +28239.jpg +13327.jpg +8917.jpg +13101.jpg +27702.jpg +8283.jpg +12002.jpg +26448.jpg +13076.jpg +25130.jpg +11126.jpg +4298.jpg +21467.jpg +23387.jpg +24093.jpg +23903.jpg +14275.jpg +26583.jpg +20535.jpg +13241.jpg +27141.jpg +16921.jpg +21587.jpg +8553.jpg +17913.jpg +19078.jpg +10685.jpg +5316.jpg +2584.jpg +7553.jpg +806.jpg +27759.jpg +22985.jpg +8175.jpg +6081.jpg +29787.jpg +12262.jpg +15378.jpg +3917.jpg +15713.jpg +25467.jpg +15261.jpg +20241.jpg +15115.jpg +11425.jpg +13756.jpg +5361.jpg +4983.jpg +16521.jpg +10461.jpg +9145.jpg +28320.jpg +3257.jpg +16357.jpg +9422.jpg +31.jpg +2260.jpg +16344.jpg +14515.jpg +17185.jpg +23648.jpg +24661.jpg +11395.jpg +29614.jpg +22938.jpg +24411.jpg +11372.jpg +22540.jpg +4021.jpg +15552.jpg +20368.jpg +20996.jpg +4350.jpg +11150.jpg +29324.jpg +24364.jpg +24844.jpg +5435.jpg +3907.jpg +25188.jpg +11941.jpg +15019.jpg +27349.jpg +28268.jpg +27491.jpg +26431.jpg +16819.jpg +7975.jpg +729.jpg +6468.jpg +20714.jpg +671.jpg +8782.jpg +19369.jpg +11516.jpg +9715.jpg +17154.jpg +6599.jpg +29094.jpg +6828.jpg +16844.jpg +4636.jpg +3079.jpg +10881.jpg +21304.jpg +26225.jpg +22893.jpg +20442.jpg +4797.jpg +12409.jpg +1957.jpg +1271.jpg +22121.jpg +28809.jpg +7155.jpg +11348.jpg +12912.jpg +25281.jpg +9646.jpg +11982.jpg +11667.jpg +20359.jpg +11694.jpg +5792.jpg +25824.jpg +4118.jpg +29788.jpg +20968.jpg +19774.jpg +13884.jpg +14400.jpg +28582.jpg +20517.jpg +12268.jpg +12239.jpg +1059.jpg +19912.jpg +27321.jpg +11847.jpg +1190.jpg +3749.jpg +28173.jpg +12076.jpg +19677.jpg +24510.jpg +14780.jpg +17452.jpg +10870.jpg +10357.jpg +20721.jpg +29487.jpg +22006.jpg +4256.jpg +13874.jpg +10383.jpg +3091.jpg +16631.jpg +10055.jpg +21057.jpg +29046.jpg +25859.jpg +29458.jpg +20999.jpg +12047.jpg +25222.jpg +6509.jpg +27157.jpg +25112.jpg +7476.jpg +20344.jpg +24554.jpg +11777.jpg +1436.jpg +3623.jpg +15917.jpg +16435.jpg +1848.jpg +12082.jpg +8687.jpg +25706.jpg +6220.jpg +8539.jpg +17659.jpg +18318.jpg +851.jpg +8289.jpg +25582.jpg +17883.jpg +22742.jpg +27356.jpg +27721.jpg +25337.jpg +8447.jpg +7349.jpg +20765.jpg +21667.jpg +12694.jpg +5604.jpg +1951.jpg +2833.jpg +15499.jpg +23109.jpg +6596.jpg +18536.jpg +4772.jpg +13129.jpg +11259.jpg +8867.jpg +14737.jpg +21197.jpg +21401.jpg +10828.jpg +8046.jpg +28383.jpg +2325.jpg +3419.jpg +6168.jpg +15779.jpg +535.jpg +875.jpg +3737.jpg +17667.jpg +8141.jpg +10495.jpg +8987.jpg +29663.jpg +9790.jpg +22193.jpg +26660.jpg +6721.jpg +1065.jpg +4075.jpg +15283.jpg +28693.jpg +14052.jpg +24326.jpg +14503.jpg +16012.jpg +16580.jpg +11627.jpg +167.jpg +24645.jpg +18369.jpg +7430.jpg +12338.jpg +5785.jpg +18573.jpg +28723.jpg +2795.jpg +12463.jpg +17920.jpg +2280.jpg +27736.jpg +7767.jpg +23167.jpg +29430.jpg +27079.jpg +4871.jpg +17459.jpg +18863.jpg +18382.jpg +18010.jpg +21017.jpg +209.jpg +29674.jpg +6505.jpg +29314.jpg +7245.jpg +4260.jpg +27450.jpg +18634.jpg +20528.jpg +28037.jpg +21053.jpg +15962.jpg +25221.jpg +26817.jpg +1343.jpg +24078.jpg +24977.jpg +7029.jpg +14316.jpg +24334.jpg +23087.jpg +19559.jpg +21099.jpg +21902.jpg +9838.jpg +2288.jpg +28199.jpg +8837.jpg +25576.jpg +27150.jpg +27400.jpg +11994.jpg +29520.jpg +5986.jpg +14909.jpg +3455.jpg +27633.jpg +15419.jpg +2880.jpg +15983.jpg +20599.jpg +12837.jpg +5693.jpg +26988.jpg +15510.jpg +10848.jpg +24178.jpg +6465.jpg +17655.jpg +6327.jpg +2619.jpg +29438.jpg +12372.jpg +2789.jpg +514.jpg +6225.jpg +29824.jpg +6645.jpg +23744.jpg +7796.jpg +27923.jpg +17791.jpg +9521.jpg +26850.jpg +18243.jpg +8236.jpg +17280.jpg +215.jpg +9558.jpg +8756.jpg +1945.jpg +25278.jpg +13367.jpg +18204.jpg +1477.jpg +26676.jpg +21221.jpg +16276.jpg +2540.jpg +20386.jpg +21980.jpg +9230.jpg +1326.jpg +15761.jpg +20264.jpg +27166.jpg +14961.jpg +11856.jpg +22595.jpg +2969.jpg +17717.jpg +13159.jpg +5331.jpg +11626.jpg +20680.jpg +25603.jpg +3081.jpg +9082.jpg +12078.jpg +27973.jpg +25727.jpg +20082.jpg +25048.jpg +27850.jpg +29710.jpg +17296.jpg +12271.jpg +1903.jpg +22824.jpg +7243.jpg +21651.jpg +4074.jpg +5099.jpg +4898.jpg +15125.jpg +6128.jpg +7712.jpg +1665.jpg +20117.jpg +18835.jpg +28548.jpg +8185.jpg +26687.jpg +9370.jpg +14839.jpg +16823.jpg +3105.jpg +24984.jpg +19744.jpg +20821.jpg +26820.jpg +26568.jpg +22718.jpg +10002.jpg +5835.jpg +5135.jpg +13484.jpg +16609.jpg +14508.jpg +634.jpg +12832.jpg +20131.jpg +18989.jpg +1277.jpg +16802.jpg +21370.jpg +19561.jpg +145.jpg +17001.jpg +22798.jpg +17473.jpg +29682.jpg +26562.jpg +15593.jpg +25217.jpg +270.jpg +1474.jpg +8556.jpg +3400.jpg +8712.jpg +22233.jpg +27818.jpg +18713.jpg +8493.jpg +207.jpg +5867.jpg +18411.jpg +17612.jpg +6695.jpg +27228.jpg +27353.jpg +25030.jpg +25912.jpg +1722.jpg +27650.jpg +25992.jpg +29960.jpg +7320.jpg +20291.jpg +16983.jpg +60.jpg +15522.jpg +23669.jpg +15085.jpg +26902.jpg +17911.jpg +2824.jpg +29222.jpg +2314.jpg +23599.jpg +14703.jpg +20797.jpg +18869.jpg +9572.jpg +3446.jpg +7208.jpg +3030.jpg +27497.jpg +4274.jpg +25827.jpg +17798.jpg +14563.jpg +20224.jpg +4989.jpg +28707.jpg +14954.jpg +200.jpg +16613.jpg +22141.jpg +19144.jpg +26086.jpg +24465.jpg +314.jpg +29969.jpg +8822.jpg +8883.jpg +23068.jpg +134.jpg +19325.jpg +4244.jpg +14445.jpg +17402.jpg +8985.jpg +23583.jpg +11592.jpg +6945.jpg +15045.jpg +23239.jpg +4824.jpg +26787.jpg +115.jpg +22315.jpg +10397.jpg +17238.jpg +25242.jpg +18397.jpg +20758.jpg +25343.jpg +10185.jpg +12183.jpg +3244.jpg +14477.jpg +7017.jpg +23641.jpg +18716.jpg +15648.jpg +21365.jpg +19972.jpg +18005.jpg +12633.jpg +14067.jpg +29753.jpg +26564.jpg +8728.jpg +8100.jpg +23264.jpg +19940.jpg +12956.jpg +16134.jpg +24873.jpg +21381.jpg +22637.jpg +14897.jpg +29881.jpg +21505.jpg +1220.jpg +21016.jpg +7508.jpg +3020.jpg +3317.jpg +22169.jpg +9394.jpg +18193.jpg +4626.jpg +19266.jpg +24255.jpg +28289.jpg +19466.jpg +21622.jpg +21281.jpg +12058.jpg +24274.jpg +9913.jpg +27925.jpg +5612.jpg +24171.jpg +6526.jpg +26357.jpg +12292.jpg +9410.jpg +24166.jpg +24544.jpg +14788.jpg +20147.jpg +28359.jpg +28601.jpg +627.jpg +13658.jpg +21094.jpg +12445.jpg +18195.jpg +13582.jpg +10101.jpg +5191.jpg +12111.jpg +6825.jpg +23000.jpg +15198.jpg +13954.jpg +17653.jpg +17958.jpg +16874.jpg +4913.jpg +29775.jpg +10604.jpg +18487.jpg +5763.jpg +25409.jpg +22036.jpg +25815.jpg +5499.jpg +13967.jpg +2228.jpg +3595.jpg +17181.jpg +13018.jpg +8726.jpg +11243.jpg +20032.jpg +20706.jpg +23577.jpg +19961.jpg +18030.jpg +25920.jpg +13406.jpg +16001.jpg +18142.jpg +27571.jpg +16699.jpg +11416.jpg +3000.jpg +15618.jpg +23964.jpg +16575.jpg +27527.jpg +268.jpg +23631.jpg +4231.jpg +14777.jpg +26622.jpg +3295.jpg +23938.jpg +23574.jpg +15141.jpg +3315.jpg +9917.jpg +15477.jpg +9295.jpg +5420.jpg +13174.jpg +16245.jpg +17338.jpg +11249.jpg +4517.jpg +23168.jpg +13611.jpg +24304.jpg +11908.jpg +23902.jpg +5482.jpg +12977.jpg +3231.jpg +19035.jpg +17869.jpg +5222.jpg +21919.jpg +22716.jpg +7813.jpg +418.jpg +26813.jpg +9868.jpg +6481.jpg +3051.jpg +26734.jpg +6196.jpg +2710.jpg +27902.jpg +15841.jpg +9439.jpg +14640.jpg +21003.jpg +6087.jpg +22814.jpg +21215.jpg +29372.jpg +23179.jpg +8714.jpg +1459.jpg +23637.jpg +969.jpg +24819.jpg +4656.jpg +10667.jpg +4116.jpg +10176.jpg +13331.jpg +3297.jpg +26064.jpg +7143.jpg +19193.jpg +10021.jpg +4451.jpg +14532.jpg +8530.jpg +19492.jpg +21481.jpg +26826.jpg +4917.jpg +26766.jpg +3156.jpg +19484.jpg +12106.jpg +3519.jpg +9415.jpg +24054.jpg +9349.jpg +5949.jpg +4912.jpg +763.jpg +28114.jpg +19476.jpg +22846.jpg +584.jpg +4130.jpg +3890.jpg +22509.jpg +5306.jpg +27831.jpg +19879.jpg +21695.jpg +1371.jpg +3197.jpg +6615.jpg +24989.jpg +14823.jpg +13396.jpg +26667.jpg +20980.jpg +1824.jpg +17758.jpg +73.jpg +10693.jpg +12791.jpg +25411.jpg +18953.jpg +22109.jpg +6888.jpg +761.jpg +8938.jpg +5263.jpg +15364.jpg +26449.jpg +6137.jpg +26413.jpg +15682.jpg +18839.jpg +15990.jpg +721.jpg +8715.jpg +5529.jpg +408.jpg +21504.jpg +27160.jpg +16013.jpg +27894.jpg +21380.jpg +14362.jpg +17369.jpg +19778.jpg +3126.jpg +27290.jpg +27531.jpg +13419.jpg +2338.jpg +21497.jpg +25829.jpg +16871.jpg +9539.jpg +4182.jpg +4921.jpg +29823.jpg +13207.jpg +15644.jpg +21912.jpg +97.jpg +12231.jpg +9930.jpg +85.jpg +16417.jpg +19807.jpg +6519.jpg +25847.jpg +2830.jpg +14002.jpg +5921.jpg +11472.jpg +3664.jpg +4746.jpg +24682.jpg +10614.jpg +3883.jpg +21647.jpg +27043.jpg +3530.jpg +12653.jpg +2430.jpg +4696.jpg +3465.jpg +8801.jpg +21515.jpg +19328.jpg +7024.jpg +13867.jpg +27509.jpg +19856.jpg +2285.jpg +24714.jpg +19380.jpg +5015.jpg +19454.jpg +1339.jpg +16175.jpg +12367.jpg +5559.jpg +19270.jpg +1669.jpg +16649.jpg +20956.jpg +21552.jpg +11625.jpg +25372.jpg +5671.jpg +4910.jpg +11307.jpg +17240.jpg +16364.jpg +6926.jpg +9563.jpg +2638.jpg +13357.jpg +23786.jpg +132.jpg +22039.jpg +22973.jpg +8159.jpg +27956.jpg +25359.jpg +19824.jpg +12388.jpg +13500.jpg +12424.jpg +15444.jpg +11303.jpg +10765.jpg +17926.jpg +22382.jpg +4253.jpg +4128.jpg +29594.jpg +4131.jpg +25392.jpg +22681.jpg +9480.jpg +13602.jpg +11628.jpg +18655.jpg +3087.jpg +12742.jpg +9738.jpg +1912.jpg +1520.jpg +25540.jpg +1537.jpg +17499.jpg +24472.jpg +26631.jpg +3767.jpg +19916.jpg +5798.jpg +27644.jpg +22034.jpg +16592.jpg +3775.jpg +5197.jpg +3437.jpg +20908.jpg +21702.jpg +4821.jpg +19259.jpg +2393.jpg +6753.jpg +5731.jpg +27307.jpg +21493.jpg +16333.jpg +22731.jpg +7373.jpg +12689.jpg +6144.jpg +24695.jpg +19647.jpg +7283.jpg +8704.jpg +1067.jpg +12448.jpg +19029.jpg +23126.jpg +8184.jpg +10207.jpg +6863.jpg +1708.jpg +28613.jpg +28451.jpg +28384.jpg +7872.jpg +22255.jpg +18987.jpg +5810.jpg +6650.jpg +5395.jpg +26130.jpg +11837.jpg +11180.jpg +2339.jpg +2270.jpg +10294.jpg +15666.jpg +3453.jpg +815.jpg +8148.jpg +21615.jpg +5542.jpg +21771.jpg +20073.jpg +6322.jpg +8434.jpg +10116.jpg +25157.jpg +1526.jpg +3506.jpg +121.jpg +14820.jpg +265.jpg +29895.jpg +4972.jpg +13493.jpg +24940.jpg +2588.jpg +24986.jpg +9195.jpg +1058.jpg +5352.jpg +13597.jpg +23881.jpg +6245.jpg +22088.jpg +22050.jpg +26378.jpg +605.jpg +27701.jpg +18432.jpg +29366.jpg +22556.jpg +3484.jpg +8015.jpg +6458.jpg +23493.jpg +23392.jpg +1549.jpg +13056.jpg +8634.jpg +8081.jpg +1969.jpg +11860.jpg +26184.jpg +7901.jpg +7683.jpg +13828.jpg +17022.jpg +17824.jpg +9367.jpg +18263.jpg +5625.jpg +15425.jpg +19357.jpg +2066.jpg +28664.jpg +15062.jpg +20727.jpg +27564.jpg +11115.jpg +6948.jpg +15158.jpg +28313.jpg +12140.jpg +2691.jpg +21572.jpg +22241.jpg +18644.jpg +10845.jpg +1327.jpg +29490.jpg +22269.jpg +11033.jpg +17048.jpg +26185.jpg +17710.jpg +15828.jpg +14399.jpg +26837.jpg +22676.jpg +13460.jpg +21164.jpg +22223.jpg +29069.jpg +8485.jpg +25692.jpg +15463.jpg +181.jpg +24543.jpg +26851.jpg +10976.jpg +6320.jpg +17458.jpg +8259.jpg +17229.jpg +26321.jpg +5870.jpg +6154.jpg +20.jpg +27050.jpg +11052.jpg +17380.jpg +18729.jpg +7076.jpg +14135.jpg +20175.jpg +14750.jpg +29550.jpg +26141.jpg +24975.jpg +8708.jpg +15300.jpg +1349.jpg +23275.jpg +7085.jpg +5737.jpg +22227.jpg +22338.jpg +25073.jpg +2852.jpg +2514.jpg +14826.jpg +27529.jpg +12748.jpg +895.jpg +15403.jpg +13655.jpg +29358.jpg +19927.jpg +10867.jpg +5028.jpg +25600.jpg +3500.jpg +4055.jpg +19265.jpg +12522.jpg +26747.jpg +14792.jpg +18524.jpg +29631.jpg +1241.jpg +4487.jpg +28700.jpg +15246.jpg +20757.jpg +18727.jpg +26477.jpg +18289.jpg +2916.jpg +25632.jpg +19531.jpg +26939.jpg +654.jpg +897.jpg +12872.jpg +19446.jpg +7312.jpg +14046.jpg +26286.jpg +14473.jpg +3607.jpg +5637.jpg +15290.jpg +398.jpg +6381.jpg +486.jpg +13534.jpg +9728.jpg +18463.jpg +3612.jpg +6594.jpg +22719.jpg +24771.jpg +12712.jpg +4014.jpg +15944.jpg +260.jpg +28263.jpg +19377.jpg +5641.jpg +1877.jpg +523.jpg +6026.jpg +28342.jpg +11187.jpg +24655.jpg +18736.jpg +7761.jpg +410.jpg +3382.jpg +1239.jpg +13805.jpg +313.jpg +23348.jpg +27612.jpg +10038.jpg +20141.jpg +5470.jpg +5298.jpg +2988.jpg +4559.jpg +4840.jpg +17699.jpg +261.jpg +12189.jpg +26776.jpg +13454.jpg +29513.jpg +156.jpg +4463.jpg +9815.jpg +4932.jpg +8193.jpg +18544.jpg +25715.jpg +20591.jpg +7268.jpg +6925.jpg +4106.jpg +28853.jpg +14589.jpg +10564.jpg +12142.jpg +1776.jpg +2505.jpg +14244.jpg +23723.jpg +21478.jpg +3693.jpg +9423.jpg +2147.jpg +17255.jpg +2467.jpg +27008.jpg +10049.jpg +29191.jpg +26246.jpg +6018.jpg +6511.jpg +13748.jpg +14813.jpg +15673.jpg +11613.jpg +18751.jpg +6060.jpg +28824.jpg +21836.jpg +23101.jpg +7395.jpg +10917.jpg +10184.jpg +27556.jpg +6775.jpg +11107.jpg +854.jpg +16498.jpg +17231.jpg +15222.jpg +10585.jpg +23727.jpg +27202.jpg +6942.jpg +24764.jpg +21074.jpg +27205.jpg +4886.jpg +26579.jpg +7596.jpg +4617.jpg +28732.jpg +69.jpg +7154.jpg +20564.jpg +22043.jpg +5516.jpg +3113.jpg +24917.jpg +12203.jpg +7469.jpg +15534.jpg +27866.jpg +24234.jpg +26637.jpg +11773.jpg +6630.jpg +18920.jpg +26580.jpg +29160.jpg +3019.jpg +3513.jpg +27856.jpg +25748.jpg +26308.jpg +9672.jpg +8732.jpg +22545.jpg +10655.jpg +2705.jpg +26287.jpg +17837.jpg +6958.jpg +27392.jpg +14636.jpg +15909.jpg +17704.jpg +9528.jpg +27010.jpg +24433.jpg +14528.jpg +12917.jpg +1810.jpg +4285.jpg +11482.jpg +13302.jpg +9581.jpg +10916.jpg +16936.jpg +3072.jpg +21847.jpg +2513.jpg +11713.jpg +21537.jpg +20759.jpg +27762.jpg +1206.jpg +15326.jpg +29496.jpg +29634.jpg +376.jpg +27549.jpg +27476.jpg +6288.jpg +4222.jpg +10938.jpg +8933.jpg +10648.jpg +22361.jpg +15873.jpg +237.jpg +16206.jpg +11530.jpg +23195.jpg +9259.jpg +7944.jpg +8388.jpg +13719.jpg +5988.jpg +7996.jpg +26139.jpg +21088.jpg +2853.jpg +21310.jpg +12072.jpg +5502.jpg +11004.jpg +28846.jpg +8971.jpg +19996.jpg +559.jpg +4108.jpg +19460.jpg +26423.jpg +25419.jpg +14643.jpg +17595.jpg +12472.jpg +28084.jpg +25984.jpg +24824.jpg +22429.jpg +22851.jpg +21070.jpg +10899.jpg +3954.jpg +6050.jpg +11419.jpg +15588.jpg +10308.jpg +10413.jpg +5990.jpg +12481.jpg +13634.jpg +14791.jpg +28310.jpg +1332.jpg +21318.jpg +19387.jpg +10527.jpg +16129.jpg +29670.jpg +14031.jpg +13150.jpg +8555.jpg +4225.jpg +28541.jpg +17138.jpg +3310.jpg +18090.jpg +24570.jpg +12521.jpg +25549.jpg +19133.jpg +17216.jpg +12119.jpg +14057.jpg +25340.jpg +14903.jpg +2001.jpg +20427.jpg +6649.jpg +24356.jpg +12882.jpg +26205.jpg +24109.jpg +11550.jpg +20200.jpg +4625.jpg +7488.jpg +6535.jpg +22862.jpg +28820.jpg +25053.jpg +2546.jpg +13804.jpg +19157.jpg +17546.jpg +10911.jpg +29845.jpg +6077.jpg +27766.jpg +7971.jpg +3861.jpg +18893.jpg +9163.jpg +16431.jpg +26925.jpg +26856.jpg +23343.jpg +18937.jpg +21312.jpg +13438.jpg +8492.jpg +3045.jpg +5200.jpg +4083.jpg +9491.jpg +15889.jpg +18059.jpg +18962.jpg +18183.jpg +7827.jpg +3112.jpg +2568.jpg +22892.jpg +29860.jpg +21961.jpg +6951.jpg +28948.jpg +15162.jpg +7714.jpg +189.jpg +27252.jpg +28967.jpg +11196.jpg +1819.jpg +19773.jpg +23286.jpg +14186.jpg +1798.jpg +9523.jpg +4801.jpg +20561.jpg +5489.jpg +23346.jpg +22694.jpg +1177.jpg +28566.jpg +23941.jpg +3100.jpg +15474.jpg +6795.jpg +5506.jpg +11042.jpg +13976.jpg +28066.jpg +20925.jpg +25213.jpg +3005.jpg +5237.jpg +6915.jpg +10946.jpg +12745.jpg +10923.jpg +15906.jpg +29193.jpg +13887.jpg +18707.jpg +8795.jpg +1901.jpg +24503.jpg +17741.jpg +6710.jpg +16713.jpg +21044.jpg +19776.jpg +24092.jpg +9498.jpg +296.jpg +16536.jpg +21701.jpg +17018.jpg +21393.jpg +3422.jpg +10683.jpg +9271.jpg +17567.jpg +24448.jpg +7050.jpg +4120.jpg +23050.jpg +20596.jpg +25301.jpg +27128.jpg +7222.jpg +10504.jpg +17174.jpg +4686.jpg +7160.jpg +1133.jpg +6613.jpg +20742.jpg +13411.jpg +20095.jpg +15501.jpg +25591.jpg +27989.jpg +28283.jpg +3962.jpg +20761.jpg +19199.jpg +4349.jpg +29130.jpg +21480.jpg +26714.jpg +16811.jpg +13312.jpg +16440.jpg +16209.jpg +4756.jpg +11234.jpg +22853.jpg +11937.jpg +25363.jpg +17899.jpg +28646.jpg +14451.jpg +10050.jpg +17333.jpg +2438.jpg +3921.jpg +17320.jpg +7027.jpg +4649.jpg +21343.jpg +5585.jpg +24406.jpg +18657.jpg +1330.jpg +16463.jpg +1437.jpg +26788.jpg +18964.jpg +26145.jpg +22991.jpg +28031.jpg +24115.jpg +3256.jpg +18823.jpg +28895.jpg +12385.jpg +4485.jpg +20860.jpg +14456.jpg +14644.jpg +9464.jpg +7335.jpg +15637.jpg +7903.jpg +21042.jpg +29591.jpg +29299.jpg +21352.jpg +4419.jpg +419.jpg +5593.jpg +2178.jpg +3935.jpg +18402.jpg +11155.jpg +20787.jpg +29516.jpg +4003.jpg +1484.jpg +23200.jpg +28590.jpg +9419.jpg +22310.jpg +23025.jpg +15704.jpg +28784.jpg +26234.jpg +17440.jpg +18425.jpg +12695.jpg +676.jpg +8178.jpg +9101.jpg +17102.jpg +17849.jpg +18326.jpg +6157.jpg +24513.jpg +12211.jpg +23158.jpg +18526.jpg +8773.jpg +18903.jpg +22664.jpg +9313.jpg +24215.jpg +12542.jpg +28030.jpg +11981.jpg +29813.jpg +29413.jpg +19474.jpg +13010.jpg +13600.jpg +15214.jpg +20515.jpg +8128.jpg +8425.jpg +9911.jpg +980.jpg +24152.jpg +11604.jpg +3502.jpg +11218.jpg +20252.jpg +24168.jpg +20975.jpg +23397.jpg +28697.jpg +1021.jpg +16523.jpg +20347.jpg +12929.jpg +27659.jpg +9577.jpg +13335.jpg +12958.jpg +19733.jpg +22207.jpg +15036.jpg +14684.jpg +21165.jpg +22971.jpg +20590.jpg +24431.jpg +22330.jpg +4982.jpg +25872.jpg +20334.jpg +18306.jpg +12646.jpg +18185.jpg +26004.jpg +5265.jpg +776.jpg +16993.jpg +23524.jpg +19017.jpg +1928.jpg +19805.jpg +4528.jpg +28358.jpg +23659.jpg +5096.jpg +26963.jpg +6041.jpg +5861.jpg +19327.jpg +2630.jpg +4363.jpg +3851.jpg +10783.jpg +26594.jpg +16742.jpg +7837.jpg +25133.jpg +16225.jpg +5598.jpg +10157.jpg +20475.jpg +12965.jpg +1779.jpg +13866.jpg +21813.jpg +2361.jpg +26759.jpg +6550.jpg +6904.jpg +8484.jpg +12616.jpg +22519.jpg +23358.jpg +2807.jpg +17472.jpg +3456.jpg +16808.jpg +8847.jpg +22821.jpg +4353.jpg +8891.jpg +8549.jpg +6800.jpg +20550.jpg +11553.jpg +6515.jpg +11732.jpg +20772.jpg +4592.jpg +5238.jpg +7855.jpg +15211.jpg +11894.jpg +19552.jpg +6593.jpg +2163.jpg +29023.jpg +29827.jpg +1747.jpg +13977.jpg +1432.jpg +8001.jpg +13668.jpg +29082.jpg +8389.jpg +26375.jpg +5774.jpg +14786.jpg +5925.jpg +8752.jpg +18241.jpg +17426.jpg +449.jpg +28419.jpg +15155.jpg +7914.jpg +10754.jpg +23277.jpg +10271.jpg +10284.jpg +15550.jpg +2120.jpg +7733.jpg +7560.jpg +20183.jpg +5795.jpg +28815.jpg +5740.jpg +15775.jpg +6454.jpg +26960.jpg +22355.jpg +24102.jpg +10250.jpg +16374.jpg +22525.jpg +19067.jpg +16.jpg +19731.jpg +26126.jpg +23440.jpg +29355.jpg +28347.jpg +4687.jpg +27608.jpg +11564.jpg +5587.jpg +1674.jpg +7006.jpg +18368.jpg +25452.jpg +28714.jpg +18300.jpg +24266.jpg +23427.jpg +28171.jpg +14985.jpg +1828.jpg +27257.jpg +1373.jpg +12480.jpg +3016.jpg +16099.jpg +6796.jpg +5512.jpg +19963.jpg +8172.jpg +22649.jpg +6555.jpg +25564.jpg +2166.jpg +9401.jpg +1676.jpg +18973.jpg +17581.jpg +21414.jpg +16041.jpg +18027.jpg +14900.jpg +1883.jpg +658.jpg +14454.jpg +8798.jpg +8511.jpg +26872.jpg +8291.jpg +5570.jpg +29263.jpg +26836.jpg +6597.jpg +1426.jpg +11757.jpg +15293.jpg +9933.jpg +29131.jpg +24268.jpg +3658.jpg +6346.jpg +5350.jpg +18261.jpg +25804.jpg +14989.jpg +7865.jpg +4703.jpg +28898.jpg +76.jpg +3076.jpg +23673.jpg +26385.jpg +27685.jpg +28526.jpg +25155.jpg +10747.jpg +20763.jpg +884.jpg +28775.jpg +13479.jpg +27467.jpg +8631.jpg +3585.jpg +28638.jpg +10280.jpg +5527.jpg +20037.jpg +13768.jpg +18096.jpg +23603.jpg +28725.jpg +17050.jpg +7736.jpg +17797.jpg +22108.jpg +17100.jpg +3591.jpg +16798.jpg +27637.jpg +25735.jpg +1843.jpg +3956.jpg +27116.jpg +17313.jpg +17917.jpg +18140.jpg +1098.jpg +11435.jpg +950.jpg +29500.jpg +14933.jpg +26197.jpg +8472.jpg +730.jpg +9153.jpg +14958.jpg +17059.jpg +7742.jpg +7482.jpg +17454.jpg +27355.jpg +1658.jpg +18436.jpg +21534.jpg +14696.jpg +15651.jpg +25854.jpg +26842.jpg +22119.jpg +26834.jpg +16375.jpg +22370.jpg +19255.jpg +2238.jpg +5078.jpg +20136.jpg +979.jpg +11466.jpg +20204.jpg +24685.jpg +22535.jpg +747.jpg +4236.jpg +22732.jpg +19343.jpg +6350.jpg +20035.jpg +10541.jpg +19518.jpg +22342.jpg +1958.jpg +16908.jpg +3537.jpg +5688.jpg +21550.jpg +29277.jpg +10375.jpg +14498.jpg +13430.jpg +6150.jpg +20440.jpg +10483.jpg +25162.jpg +23948.jpg +25423.jpg +21306.jpg +22277.jpg +22392.jpg +19862.jpg +22384.jpg +21311.jpg +17196.jpg +12447.jpg +25144.jpg +29026.jpg +27440.jpg +28118.jpg +12839.jpg +5782.jpg +24062.jpg +19632.jpg +14069.jpg +12452.jpg +6755.jpg +8699.jpg +19482.jpg +21907.jpg +16467.jpg +10401.jpg +12468.jpg +15337.jpg +19526.jpg +5155.jpg +10391.jpg +20193.jpg +25397.jpg +125.jpg +137.jpg +23204.jpg +18757.jpg +9835.jpg +19508.jpg +5409.jpg +9083.jpg +6779.jpg +9359.jpg +20952.jpg +3866.jpg +22155.jpg +24390.jpg +18187.jpg +20546.jpg +116.jpg +4126.jpg +10807.jpg +7282.jpg +18264.jpg +2075.jpg +11506.jpg +18052.jpg +4240.jpg +10716.jpg +11143.jpg +3711.jpg +26318.jpg +212.jpg +27570.jpg +19277.jpg +5079.jpg +29471.jpg +8120.jpg +16277.jpg +14564.jpg +8358.jpg +26381.jpg +18220.jpg +24497.jpg +16569.jpg +7446.jpg +14073.jpg +2068.jpg +6290.jpg +23396.jpg +4666.jpg +16258.jpg +13927.jpg +14527.jpg +4862.jpg +10366.jpg +26646.jpg +28741.jpg +1719.jpg +2116.jpg +6302.jpg +13815.jpg +4668.jpg +6261.jpg +8799.jpg +16747.jpg +14125.jpg +16828.jpg +3215.jpg +53.jpg +28468.jpg +1243.jpg +13888.jpg +23097.jpg +742.jpg +4615.jpg +23514.jpg +26303.jpg +26382.jpg +17453.jpg +7873.jpg +28110.jpg +10536.jpg +21206.jpg +13453.jpg +27848.jpg +10970.jpg +2842.jpg +4110.jpg +5102.jpg +7285.jpg +20661.jpg +28989.jpg +2614.jpg +28029.jpg +24097.jpg +12722.jpg +19840.jpg +20749.jpg +17392.jpg +26722.jpg +11502.jpg +20751.jpg +5378.jpg +29403.jpg +409.jpg +28962.jpg +24230.jpg +17843.jpg +4594.jpg +956.jpg +9261.jpg +23694.jpg +18384.jpg +25057.jpg +23342.jpg +1770.jpg +9711.jpg +2061.jpg +21832.jpg +15278.jpg +18797.jpg +24468.jpg +23029.jpg +8821.jpg +15237.jpg +28576.jpg +14721.jpg +26536.jpg +2713.jpg +14063.jpg +5053.jpg +17856.jpg +16287.jpg +13716.jpg +18738.jpg +17714.jpg +28745.jpg +5939.jpg +10081.jpg +2192.jpg +18637.jpg +19237.jpg +7235.jpg +27138.jpg +3532.jpg +3307.jpg +18075.jpg +19910.jpg +5733.jpg +23457.jpg +8126.jpg +17678.jpg +22489.jpg +21648.jpg +19715.jpg +3913.jpg +3332.jpg +25184.jpg +8923.jpg +27405.jpg +24967.jpg +1020.jpg +21571.jpg +9969.jpg +25866.jpg +500.jpg +25435.jpg +9803.jpg +29041.jpg +26496.jpg +22432.jpg +21471.jpg +21045.jpg +7803.jpg +28209.jpg +25651.jpg +24013.jpg +373.jpg +7374.jpg +6129.jpg +3271.jpg +20658.jpg +19517.jpg +21607.jpg +29509.jpg +7911.jpg +13113.jpg +22831.jpg +13409.jpg +26503.jpg +20470.jpg +18403.jpg +4779.jpg +3497.jpg +14867.jpg +27541.jpg +24149.jpg +9009.jpg +20819.jpg +16052.jpg +6699.jpg +10515.jpg +2704.jpg +2583.jpg +6378.jpg +20542.jpg +10114.jpg +6435.jpg +18889.jpg +19944.jpg +18722.jpg +213.jpg +28680.jpg +26964.jpg +4448.jpg +25686.jpg +3228.jpg +7866.jpg +6344.jpg +1655.jpg +19606.jpg +14767.jpg +18219.jpg +7856.jpg +20799.jpg +25071.jpg +18160.jpg +13400.jpg +4610.jpg +17746.jpg +11197.jpg +18398.jpg +2005.jpg +21896.jpg +17389.jpg +19051.jpg +7743.jpg +19653.jpg +23033.jpg +3447.jpg +19621.jpg +26227.jpg +22439.jpg +24853.jpg +11907.jpg +12574.jpg +7780.jpg +5885.jpg +17019.jpg +21284.jpg +19890.jpg +10679.jpg +18235.jpg +9731.jpg +11939.jpg +13944.jpg +1502.jpg +21161.jpg +22561.jpg +28056.jpg +10481.jpg +23298.jpg +3665.jpg +11161.jpg +11957.jpg +15294.jpg +12495.jpg +12950.jpg +24241.jpg +11352.jpg +19587.jpg +22680.jpg +3189.jpg +7225.jpg +4407.jpg +29623.jpg +25254.jpg +15373.jpg +23930.jpg +19012.jpg +1622.jpg +29737.jpg +12935.jpg +15098.jpg +21720.jpg +18667.jpg +5034.jpg +4204.jpg +18954.jpg +7904.jpg +5722.jpg +12787.jpg +6713.jpg +3417.jpg +20465.jpg +14047.jpg +5523.jpg +19073.jpg +1117.jpg +10894.jpg +17332.jpg +15484.jpg +20323.jpg +18291.jpg +370.jpg +6260.jpg +15908.jpg +5898.jpg +19903.jpg +14866.jpg +17985.jpg +338.jpg +7392.jpg +25072.jpg +786.jpg +22298.jpg +2284.jpg +8284.jpg +23211.jpg +9818.jpg +3093.jpg +9567.jpg +21079.jpg +11438.jpg +10096.jpg +26725.jpg +13314.jpg +29910.jpg +21078.jpg +20645.jpg +28814.jpg +817.jpg +617.jpg +8577.jpg +28760.jpg +27033.jpg +1422.jpg +5140.jpg +29333.jpg +301.jpg +4909.jpg +25803.jpg +19612.jpg +14375.jpg +18310.jpg +14177.jpg +8875.jpg +10789.jpg +20034.jpg +15608.jpg +26697.jpg +18228.jpg +4859.jpg +15016.jpg +24359.jpg +6635.jpg +25097.jpg +3846.jpg +24023.jpg +8155.jpg +17039.jpg +28184.jpg +26596.jpg +1183.jpg +1196.jpg +28090.jpg +1143.jpg +12657.jpg +8420.jpg +27313.jpg +29409.jpg +20237.jpg +2188.jpg +12794.jpg +11975.jpg +4506.jpg +1588.jpg +26558.jpg +8757.jpg +28656.jpg +17254.jpg +13904.jpg +4573.jpg +19948.jpg +14241.jpg +24367.jpg +9186.jpg +17980.jpg +18267.jpg +1056.jpg +28993.jpg +22943.jpg +19208.jpg +28757.jpg +7678.jpg +12817.jpg +6397.jpg +24482.jpg +14618.jpg +22145.jpg +23484.jpg +14738.jpg +24464.jpg +26995.jpg +9012.jpg +14993.jpg +3352.jpg +14712.jpg +11650.jpg +10524.jpg +19224.jpg +14368.jpg +25169.jpg +10726.jpg +7984.jpg +20024.jpg +16657.jpg +16495.jpg +17738.jpg +29381.jpg +9418.jpg +15986.jpg +4733.jpg +5245.jpg +3616.jpg +12444.jpg +7810.jpg +3871.jpg +28424.jpg +7759.jpg +20904.jpg +2021.jpg +6783.jpg +2426.jpg +5640.jpg +9957.jpg +27550.jpg +2735.jpg +4284.jpg +22834.jpg +8520.jpg +12288.jpg +23435.jpg +17576.jpg +5373.jpg +17560.jpg +16570.jpg +27484.jpg +14634.jpg +5712.jpg +1022.jpg +15248.jpg +12199.jpg +4053.jpg +18817.jpg +15142.jpg +2170.jpg +17132.jpg +271.jpg +26887.jpg +17466.jpg +23897.jpg +14578.jpg +14927.jpg +19350.jpg +15517.jpg +14081.jpg +19138.jpg +12874.jpg +3162.jpg +2445.jpg +22123.jpg +24525.jpg +8272.jpg +26649.jpg +25980.jpg +17550.jpg +2658.jpg +28504.jpg +5929.jpg +11351.jpg +26485.jpg +18108.jpg +27948.jpg +2232.jpg +5561.jpg +12236.jpg +29833.jpg +2388.jpg +9005.jpg +11397.jpg +9364.jpg +18423.jpg +9388.jpg +10337.jpg +2040.jpg +7354.jpg +20322.jpg +8833.jpg +24099.jpg +29203.jpg +437.jpg +18111.jpg +12967.jpg +9414.jpg +362.jpg +19323.jpg +26341.jpg +2156.jpg +17530.jpg +21122.jpg +26453.jpg +11319.jpg +24173.jpg +4923.jpg +21146.jpg +17490.jpg +4087.jpg +20194.jpg +10470.jpg +26412.jpg +28229.jpg +25285.jpg +466.jpg +62.jpg +7516.jpg +16898.jpg +29489.jpg +5374.jpg +7465.jpg +13790.jpg +28143.jpg +22285.jpg +29245.jpg +1559.jpg +2409.jpg +4458.jpg +24658.jpg +20402.jpg +21867.jpg +26640.jpg +24087.jpg +10306.jpg +15287.jpg +19652.jpg +11832.jpg +5136.jpg +15448.jpg +24063.jpg +2955.jpg +13385.jpg +10661.jpg +9018.jpg +15965.jpg +16664.jpg +14871.jpg +4109.jpg +753.jpg +2108.jpg +25175.jpg +5651.jpg +5639.jpg +29648.jpg +5045.jpg +6024.jpg +8589.jpg +29569.jpg +12931.jpg +16004.jpg +24767.jpg +12587.jpg +2095.jpg +10230.jpg +149.jpg +14499.jpg +2586.jpg +527.jpg +2471.jpg +16271.jpg +29325.jpg +21072.jpg +13594.jpg +4121.jpg +29364.jpg +539.jpg +13614.jpg +22077.jpg +19319.jpg +19273.jpg +13296.jpg +28957.jpg +8227.jpg +12389.jpg +6770.jpg +16079.jpg +6765.jpg +19241.jpg +24156.jpg +27703.jpg +6310.jpg +29393.jpg +23760.jpg +217.jpg +18494.jpg +16500.jpg +23824.jpg +1033.jpg +18281.jpg +19843.jpg +23809.jpg +2618.jpg +17375.jpg +2074.jpg +11728.jpg +28448.jpg +8877.jpg +8655.jpg +16501.jpg +8058.jpg +13185.jpg +20396.jpg +18850.jpg +24807.jpg +8934.jpg +7082.jpg +17737.jpg +17095.jpg +12033.jpg +12841.jpg +25586.jpg +15995.jpg +2103.jpg +4810.jpg +2725.jpg +4188.jpg +7286.jpg +2963.jpg +26103.jpg +22299.jpg +2665.jpg +4819.jpg +22913.jpg +22124.jpg +11629.jpg +3695.jpg +15038.jpg +28369.jpg +23341.jpg +20122.jpg +14874.jpg +14743.jpg +26199.jpg +14621.jpg +17784.jpg +10963.jpg +24295.jpg +11517.jpg +10073.jpg +14338.jpg +22262.jpg +27631.jpg +25136.jpg +16961.jpg +3366.jpg +26142.jpg +19447.jpg +24310.jpg +8470.jpg +3412.jpg +19827.jpg +25360.jpg +10509.jpg +2796.jpg +11427.jpg +18041.jpg +16392.jpg +8763.jpg +27970.jpg +26991.jpg +21974.jpg +28632.jpg +22105.jpg +26821.jpg +13669.jpg +24240.jpg +24506.jpg +20854.jpg +26442.jpg +25116.jpg +12488.jpg +13540.jpg +27381.jpg +22448.jpg +2956.jpg +13970.jpg +12347.jpg +16068.jpg +11540.jpg +1135.jpg +22547.jpg +26814.jpg +6376.jpg +21637.jpg +22032.jpg +11026.jpg +5984.jpg +14117.jpg +4818.jpg +22994.jpg +10424.jpg +28352.jpg +29087.jpg +4164.jpg +23409.jpg +19479.jpg +12474.jpg +7795.jpg +16646.jpg +23236.jpg +20704.jpg +8190.jpg +28398.jpg +526.jpg +19231.jpg +4348.jpg +12954.jpg +5142.jpg +4134.jpg +4217.jpg +4514.jpg +25507.jpg +16444.jpg +10158.jpg +21500.jpg +3033.jpg +6167.jpg +6108.jpg +19791.jpg +22258.jpg +8669.jpg +1057.jpg +2911.jpg +25607.jpg +14924.jpg +27114.jpg +4198.jpg +12153.jpg +7000.jpg +24671.jpg +4216.jpg +24708.jpg +17526.jpg +12741.jpg +7526.jpg +24993.jpg +21201.jpg +10785.jpg +21735.jpg +8559.jpg +1037.jpg +14089.jpg +9485.jpg +14896.jpg +22682.jpg +685.jpg +12088.jpg +4393.jpg +21890.jpg +25279.jpg +26340.jpg +4041.jpg +26374.jpg +10189.jpg +2864.jpg +29699.jpg +18642.jpg +17631.jpg +4343.jpg +16765.jpg +12682.jpg +7514.jpg +28432.jpg +21033.jpg +18563.jpg +4402.jpg +5309.jpg +8415.jpg +2112.jpg +4882.jpg +12206.jpg +27393.jpg +18630.jpg +1104.jpg +8664.jpg +23420.jpg +4778.jpg +6307.jpg +8424.jpg +6316.jpg +444.jpg +2094.jpg +20824.jpg +26323.jpg +25262.jpg +25689.jpg +17993.jpg +18539.jpg +14396.jpg +456.jpg +24251.jpg +25389.jpg +24161.jpg +14347.jpg +21198.jpg +15426.jpg +16991.jpg +12579.jpg +7203.jpg +24259.jpg +14536.jpg +28520.jpg +21873.jpg +6100.jpg +7541.jpg +11935.jpg +26510.jpg +7099.jpg +9555.jpg +19461.jpg +21753.jpg +3485.jpg +24983.jpg +20080.jpg +28393.jpg +27135.jpg +280.jpg +6905.jpg +310.jpg +14291.jpg +1952.jpg +2289.jpg +27288.jpg +26094.jpg +6551.jpg +27379.jpg +27291.jpg +9862.jpg +26410.jpg +11403.jpg +4637.jpg +25849.jpg +19217.jpg +17300.jpg +16407.jpg +4570.jpg +22112.jpg +17696.jpg +2921.jpg +6849.jpg +7112.jpg +19813.jpg +18333.jpg +23987.jpg +2930.jpg +18565.jpg +10933.jpg +18902.jpg +2385.jpg +14008.jpg +20472.jpg +13368.jpg +5901.jpg +17671.jpg +26756.jpg +21384.jpg +19438.jpg +28715.jpg +19757.jpg +19676.jpg +25966.jpg +19437.jpg +4136.jpg +15111.jpg +2564.jpg +25023.jpg +5720.jpg +2031.jpg +4853.jpg +5333.jpg +27511.jpg +2362.jpg +9033.jpg +17020.jpg +9658.jpg +25146.jpg +28918.jpg +5328.jpg +29231.jpg +2865.jpg +22468.jpg +15703.jpg +7126.jpg +14233.jpg +1006.jpg +18303.jpg +25923.jpg +4328.jpg +16179.jpg +28521.jpg +27281.jpg +27388.jpg +17665.jpg +16215.jpg +19202.jpg +5437.jpg +8264.jpg +28926.jpg +27054.jpg +15622.jpg +23247.jpg +7423.jpg +21900.jpg +11177.jpg +2082.jpg +21903.jpg +11121.jpg +16796.jpg +12776.jpg +21679.jpg +8005.jpg +20708.jpg +19090.jpg +7878.jpg +9157.jpg +26775.jpg +7920.jpg +8568.jpg +15786.jpg +23582.jpg +29510.jpg +29973.jpg +6981.jpg +5741.jpg +21770.jpg +12181.jpg +24107.jpg +27066.jpg +23249.jpg +5726.jpg +8662.jpg +22600.jpg +8265.jpg +18801.jpg +5940.jpg +19117.jpg +2491.jpg +28800.jpg +1041.jpg +11364.jpg +21176.jpg +23441.jpg +12545.jpg +21963.jpg +22387.jpg +21181.jpg +9636.jpg +8412.jpg +17225.jpg +1414.jpg +29855.jpg +13677.jpg +11005.jpg +22461.jpg +650.jpg +15997.jpg +12583.jpg +14972.jpg +14833.jpg +29603.jpg +16083.jpg +17941.jpg +22734.jpg +6938.jpg +2794.jpg +3708.jpg +6829.jpg +5923.jpg +13359.jpg +16509.jpg +17742.jpg +7292.jpg +11806.jpg +13200.jpg +28737.jpg +8701.jpg +15193.jpg +13975.jpg +29374.jpg +27411.jpg +21937.jpg +18512.jpg +336.jpg +18717.jpg +26137.jpg +24401.jpg +20620.jpg +16764.jpg +3023.jpg +19737.jpg +23335.jpg +29139.jpg +22830.jpg +5744.jpg +3291.jpg +8541.jpg +2416.jpg +19053.jpg +1310.jpg +12785.jpg +13834.jpg +23053.jpg +21452.jpg +15513.jpg +4505.jpg +24614.jpg +17076.jpg +15492.jpg +18758.jpg +14170.jpg +12535.jpg +26414.jpg +24190.jpg +3243.jpg +16029.jpg +25524.jpg +25843.jpg +25108.jpg +13094.jpg +3363.jpg +21675.jpg +13329.jpg +19801.jpg +1480.jpg +8839.jpg +11383.jpg +11345.jpg +6595.jpg +13845.jpg +20197.jpg +8188.jpg +12428.jpg +17029.jpg +9879.jpg +24811.jpg +13117.jpg +2557.jpg +4191.jpg +29768.jpg +17995.jpg +9674.jpg +13937.jpg +8912.jpg +2821.jpg +4579.jpg +29334.jpg +6886.jpg +715.jpg +27790.jpg +25080.jpg +1347.jpg +19103.jpg +16513.jpg +29353.jpg +12855.jpg +20951.jpg +20669.jpg +3750.jpg +4835.jpg +25970.jpg +949.jpg +24779.jpg +6337.jpg +20728.jpg +4501.jpg +15923.jpg +1529.jpg +7558.jpg +8776.jpg +680.jpg +15645.jpg +28499.jpg +28524.jpg +26666.jpg +1419.jpg +5624.jpg +15409.jpg +24073.jpg +10787.jpg +10326.jpg +28743.jpg +4341.jpg +25388.jpg +7426.jpg +26217.jpg +14657.jpg +10587.jpg +18593.jpg +21194.jpg +22931.jpg +11064.jpg +10878.jpg +6592.jpg +20959.jpg +11709.jpg +17435.jpg +11610.jpg +13395.jpg +10554.jpg +9148.jpg +24033.jpg +9762.jpg +16294.jpg +27434.jpg +11400.jpg +3173.jpg +1704.jpg +26764.jpg +22493.jpg +13468.jpg +5706.jpg +23944.jpg +16044.jpg +13126.jpg +18394.jpg +2207.jpg +20504.jpg +20870.jpg +15177.jpg +11023.jpg +10063.jpg +19130.jpg +2259.jpg +1743.jpg +1281.jpg +11311.jpg +7609.jpg +20898.jpg +23100.jpg +27933.jpg +10914.jpg +13680.jpg +1726.jpg +21553.jpg +13424.jpg +9843.jpg +17486.jpg +26636.jpg +19450.jpg +16100.jpg +17094.jpg +25170.jpg +6396.jpg +9944.jpg +4960.jpg +26873.jpg +13450.jpg +17068.jpg +5337.jpg +6842.jpg +1662.jpg +8710.jpg +13136.jpg +20328.jpg +27176.jpg +18453.jpg +780.jpg +23628.jpg +7916.jpg +6259.jpg +8156.jpg +24146.jpg +9245.jpg +9544.jpg +23020.jpg +22139.jpg +18981.jpg +26709.jpg +17031.jpg +4267.jpg +29063.jpg +8536.jpg +17336.jpg +26475.jpg +1521.jpg +21413.jpg +29568.jpg +24417.jpg +13346.jpg +23357.jpg +10979.jpg +11735.jpg +11917.jpg +17929.jpg +9520.jpg +309.jpg +8593.jpg +9290.jpg +18674.jpg +13254.jpg +5834.jpg +581.jpg +22346.jpg +20274.jpg +4058.jpg +12751.jpg +13576.jpg +10608.jpg +7819.jpg +10858.jpg +29043.jpg +1652.jpg +7868.jpg +21133.jpg +4731.jpg +29679.jpg +25552.jpg +10563.jpg +23729.jpg +25250.jpg +1962.jpg +17359.jpg +22809.jpg +19806.jpg +3127.jpg +14485.jpg +23252.jpg +666.jpg +8299.jpg +27068.jpg +1073.jpg +220.jpg +19651.jpg +26981.jpg +29680.jpg +1048.jpg +79.jpg +24660.jpg +2650.jpg +14674.jpg +11749.jpg +25297.jpg +9688.jpg +16097.jpg +6036.jpg +15797.jpg +19348.jpg +9786.jpg +29816.jpg +12552.jpg +28455.jpg +15870.jpg +15347.jpg +23676.jpg +27343.jpg +20801.jpg +20273.jpg +21018.jpg +2695.jpg +4066.jpg +2890.jpg +7062.jpg +4973.jpg +23957.jpg +4803.jpg +27295.jpg +29739.jpg +10146.jpg +3988.jpg +22580.jpg +12551.jpg +22551.jpg +10281.jpg +3632.jpg +25015.jpg +15071.jpg +16881.jpg +19178.jpg +27154.jpg +25024.jpg +28164.jpg +16902.jpg +12440.jpg +21728.jpg +26074.jpg +9049.jpg +26875.jpg +18883.jpg +24756.jpg +19562.jpg +14979.jpg +9998.jpg +8897.jpg +12011.jpg +18676.jpg +15881.jpg +25665.jpg +26506.jpg +20045.jpg +13421.jpg +12868.jpg +28145.jpg +20697.jpg +5089.jpg +18238.jpg +14981.jpg +22945.jpg +1458.jpg +24647.jpg +29954.jpg +23496.jpg +2912.jpg +27029.jpg +25479.jpg +27305.jpg +21071.jpg +21843.jpg +19887.jpg +19063.jpg +26115.jpg +28786.jpg +22891.jpg +23689.jpg +6909.jpg +25621.jpg +11818.jpg +21420.jpg +24254.jpg +18449.jpg +4851.jpg +6808.jpg +10728.jpg +26882.jpg +27168.jpg +3679.jpg +23402.jpg +16552.jpg +16787.jpg +14724.jpg +8124.jpg +2398.jpg +4410.jpg +29254.jpg +16530.jpg +12862.jpg +8365.jpg +9210.jpg +11630.jpg +4957.jpg +17865.jpg +22527.jpg +4093.jpg +18578.jpg +3873.jpg +25647.jpg +10319.jpg +3018.jpg +6865.jpg +18975.jpg +8161.jpg +8145.jpg +15571.jpg +22323.jpg +5985.jpg +18510.jpg +23354.jpg +18206.jpg +25326.jpg +23233.jpg +19008.jpg +9430.jpg +405.jpg +19497.jpg +19702.jpg +11200.jpg +15090.jpg +2155.jpg +19987.jpg +5313.jpg +11617.jpg +15639.jpg +10450.jpg +14868.jpg +15485.jpg +13514.jpg +14018.jpg +23817.jpg +17484.jpg +18914.jpg +10229.jpg +12941.jpg +8499.jpg +22777.jpg +20715.jpg +5513.jpg +21449.jpg +4011.jpg +2444.jpg +7300.jpg +20072.jpg +23910.jpg +23737.jpg +15702.jpg +9379.jpg +21722.jpg +22542.jpg +10891.jpg +19183.jpg +22288.jpg +27249.jpg +23507.jpg +8169.jpg +20833.jpg +22081.jpg +18638.jpg +6332.jpg +28053.jpg +25114.jpg +18262.jpg +24520.jpg +7730.jpg +27732.jpg +4908.jpg +10987.jpg +16965.jpg +9755.jpg +6386.jpg +14890.jpg +5036.jpg +1122.jpg +29202.jpg +17468.jpg +26706.jpg +22019.jpg +3329.jpg +19184.jpg +5381.jpg +20965.jpg +820.jpg +21913.jpg +24532.jpg +25331.jpg +1176.jpg +19643.jpg +26833.jpg +5344.jpg +23464.jpg +10499.jpg +25585.jpg +17480.jpg +24170.jpg +26443.jpg +8108.jpg +16933.jpg +1029.jpg +14108.jpg +20720.jpg +15693.jpg +7066.jpg +23593.jpg +4390.jpg +3514.jpg +16757.jpg +16591.jpg +23481.jpg +15188.jpg +5137.jpg +13506.jpg +16447.jpg +15580.jpg +13589.jpg +18003.jpg +27841.jpg +6429.jpg +5457.jpg +7389.jpg +2897.jpg +18286.jpg +426.jpg +9339.jpg +22981.jpg +28122.jpg +10239.jpg +6356.jpg +2684.jpg +5917.jpg +18855.jpg +10750.jpg +3193.jpg +27755.jpg +6215.jpg +9909.jpg +13964.jpg +8991.jpg +29417.jpg +28562.jpg +27182.jpg +23404.jpg +20671.jpg +16658.jpg +5243.jpg +18431.jpg +27589.jpg +10962.jpg +7295.jpg +3028.jpg +9075.jpg +19945.jpg +1276.jpg +550.jpg +15656.jpg +2615.jpg +13793.jpg +24476.jpg +6402.jpg +15028.jpg +25506.jpg +25391.jpg +7889.jpg +27457.jpg +1369.jpg +27084.jpg +27998.jpg +23028.jpg +25768.jpg +15825.jpg +19704.jpg +1391.jpg +27148.jpg +3395.jpg +9597.jpg +17904.jpg +7079.jpg +7439.jpg +10700.jpg +144.jpg +10173.jpg +5428.jpg +6336.jpg +19061.jpg +3377.jpg +12732.jpg +2435.jpg +29846.jpg +3986.jpg +27058.jpg +7196.jpg +17602.jpg +7190.jpg +20265.jpg +5426.jpg +4852.jpg +24677.jpg +24185.jpg +13882.jpg +9793.jpg +4061.jpg +16978.jpg +9536.jpg +27997.jpg +10384.jpg +17767.jpg +26241.jpg +3054.jpg +4064.jpg +16457.jpg +18966.jpg +8691.jpg +7575.jpg +3180.jpg +18759.jpg +8592.jpg +17779.jpg +21761.jpg +29711.jpg +1534.jpg +821.jpg +26256.jpg +12707.jpg +1701.jpg +9093.jpg +247.jpg +1218.jpg +18444.jpg +14746.jpg +12382.jpg +25385.jpg +25756.jpg +11149.jpg +17633.jpg +14977.jpg +22989.jpg +20288.jpg +23709.jpg +15979.jpg +17362.jpg +11408.jpg +27508.jpg +5674.jpg +5847.jpg +8433.jpg +3323.jpg +22650.jpg +9490.jpg +11404.jpg +10502.jpg +18077.jpg +21051.jpg +23818.jpg +14032.jpg +1803.jpg +16247.jpg +2829.jpg +11839.jpg +14106.jpg +7039.jpg +40.jpg +1744.jpg +20059.jpg +7951.jpg +8460.jpg +2397.jpg +15147.jpg +7183.jpg +21851.jpg +17832.jpg +3405.jpg +28813.jpg +6125.jpg +11412.jpg +5201.jpg +29093.jpg +29282.jpg +13637.jpg +17282.jpg +2464.jpg +14131.jpg +2480.jpg +2525.jpg +28286.jpg +4018.jpg +20769.jpg +24172.jpg +17887.jpg +15059.jpg +11534.jpg +1002.jpg +9932.jpg +12442.jpg +10061.jpg +11869.jpg +27991.jpg +19724.jpg +1364.jpg +29150.jpg +22860.jpg +14354.jpg +9039.jpg +25539.jpg +5535.jpg +17227.jpg +16621.jpg +794.jpg +24183.jpg +2595.jpg +5822.jpg +19042.jpg +15336.jpg +28742.jpg +8540.jpg +22759.jpg +21544.jpg +26570.jpg +28727.jpg +6478.jpg +28519.jpg +28250.jpg +29387.jpg +11269.jpg +4033.jpg +5909.jpg +16832.jpg +26532.jpg +15798.jpg +10290.jpg +14569.jpg +21729.jpg +28454.jpg +10598.jpg +640.jpg +18931.jpg +9462.jpg +23253.jpg +26645.jpg +17976.jpg +29122.jpg +8863.jpg +4050.jpg +7199.jpg +28975.jpg +26044.jpg +10586.jpg +10443.jpg +543.jpg +19836.jpg +24143.jpg +10188.jpg +17795.jpg +9150.jpg +6986.jpg +20149.jpg +12478.jpg +17706.jpg +19472.jpg +29698.jpg +21140.jpg +22224.jpg +29290.jpg +8770.jpg +6104.jpg +20064.jpg +29894.jpg +28251.jpg +4554.jpg +27140.jpg +23458.jpg +29840.jpg +1785.jpg +5291.jpg +2846.jpg +22785.jpg +25287.jpg +18409.jpg +9164.jpg +5359.jpg +26600.jpg +19753.jpg +15025.jpg +20893.jpg +6902.jpg +8848.jpg +23210.jpg +11787.jpg +5315.jpg +13982.jpg +27017.jpg +23570.jpg +7886.jpg +21224.jpg +18383.jpg +26684.jpg +7977.jpg +11484.jpg +3804.jpg +27474.jpg +10408.jpg +10193.jpg +24538.jpg +16682.jpg +9697.jpg +9178.jpg +16972.jpg +24345.jpg +16959.jpg +4036.jpg +8004.jpg +25437.jpg +13721.jpg +6178.jpg +23906.jpg +3892.jpg +25290.jpg +6258.jpg +5607.jpg +18770.jpg +18772.jpg +19160.jpg +20326.jpg +19931.jpg +4376.jpg +27640.jpg +6539.jpg +17771.jpg +21816.jpg +8281.jpg +6711.jpg +21295.jpg +1649.jpg +28919.jpg +17588.jpg +14523.jpg +5915.jpg +21364.jpg +24003.jpg +17270.jpg +28282.jpg +18518.jpg +21835.jpg +22018.jpg +21227.jpg +6940.jpg +22988.jpg +4491.jpg +8768.jpg +14619.jpg +24452.jpg +21538.jpg +23416.jpg +21877.jpg +17170.jpg +24011.jpg +1335.jpg +5634.jpg +8450.jpg +5175.jpg +24236.jpg +11257.jpg +12369.jpg +2026.jpg +12406.jpg +24865.jpg +24414.jpg +5951.jpg +20741.jpg +27672.jpg +14449.jpg +25259.jpg +28518.jpg +13662.jpg +18508.jpg +2090.jpg +9645.jpg +27560.jpg +9285.jpg +14384.jpg +24350.jpg +2929.jpg +28277.jpg +5478.jpg +19215.jpg +23369.jpg +28702.jpg +856.jpg +16830.jpg +28852.jpg +19370.jpg +3236.jpg +14661.jpg +19418.jpg +17684.jpg +25483.jpg +27525.jpg +8034.jpg +7398.jpg +4261.jpg +797.jpg +10663.jpg +20042.jpg +2923.jpg +1163.jpg +25495.jpg +21617.jpg +14196.jpg +24075.jpg +20443.jpg +27108.jpg +23082.jpg +26614.jpg +14195.jpg +29505.jpg +26878.jpg +8735.jpg +23245.jpg +17969.jpg +9864.jpg +6448.jpg +8574.jpg +21797.jpg +29522.jpg +18558.jpg +1693.jpg +19257.jpg +20519.jpg +16775.jpg +1266.jpg +21054.jpg +19962.jpg +26587.jpg +26555.jpg +28644.jpg +15429.jpg +9543.jpg +19973.jpg +24065.jpg +25551.jpg +18613.jpg +27784.jpg +25312.jpg +637.jpg +14112.jpg +16073.jpg +13617.jpg +12423.jpg +6114.jpg +5160.jpg +2707.jpg +4941.jpg +11979.jpg +4836.jpg +27635.jpg +13139.jpg +2587.jpg +16022.jpg +13788.jpg +1974.jpg +21558.jpg +24794.jpg +2246.jpg +23170.jpg +16779.jpg +10407.jpg +11817.jpg +25031.jpg +27929.jpg +21173.jpg +1576.jpg +24461.jpg +23607.jpg +18038.jpg +4318.jpg +23161.jpg +17748.jpg +1255.jpg +23873.jpg +7218.jpg +21005.jpg +6792.jpg +17250.jpg +15218.jpg +7141.jpg +20584.jpg +2089.jpg +25929.jpg +29482.jpg +12884.jpg +10904.jpg +19541.jpg +1592.jpg +3647.jpg +3706.jpg +27007.jpg +9638.jpg +23616.jpg +21126.jpg +180.jpg +10085.jpg +13157.jpg +17496.jpg +13817.jpg +26561.jpg +9691.jpg +6359.jpg +29553.jpg +25817.jpg +29000.jpg +21130.jpg +12918.jpg +2047.jpg +15306.jpg +23134.jpg +29684.jpg +11060.jpg +29330.jpg +4773.jpg +11230.jpg +10448.jpg +24787.jpg +5786.jpg +5819.jpg +17284.jpg +19921.jpg +29728.jpg +3279.jpg +17381.jpg +24430.jpg +12968.jpg +1751.jpg +23988.jpg +18734.jpg +24389.jpg +7521.jpg +1891.jpg +21929.jpg +4866.jpg +21114.jpg +14934.jpg +14683.jpg +21566.jpg +5335.jpg +26336.jpg +23717.jpg +8059.jpg +29534.jpg +5503.jpg +18064.jpg +21172.jpg +10489.jpg +8212.jpg +19060.jpg +20643.jpg +5439.jpg +14151.jpg +26010.jpg +16002.jpg +17900.jpg +24370.jpg +22221.jpg +7425.jpg +10463.jpg +20255.jpg +20482.jpg +14100.jpg +28192.jpg +7086.jpg +7517.jpg +20855.jpg +7994.jpg +5600.jpg +13949.jpg +17660.jpg +15573.jpg +28692.jpg +21964.jpg +1787.jpg +16932.jpg +912.jpg +9639.jpg +20660.jpg +3806.jpg +10508.jpg +4873.jpg +530.jpg +29619.jpg +18828.jpg +14435.jpg +29044.jpg +1445.jpg +11961.jpg +1088.jpg +8288.jpg +22205.jpg +5508.jpg +8282.jpg +11271.jpg +26306.jpg +26348.jpg +84.jpg +14587.jpg +25788.jpg +22974.jpg +26104.jpg +27578.jpg +25241.jpg +19165.jpg +14854.jpg +16858.jpg +19346.jpg +3280.jpg +12504.jpg +17249.jpg +8047.jpg +6339.jpg +4955.jpg +17131.jpg +1558.jpg +28169.jpg +2402.jpg +17871.jpg +14317.jpg +566.jpg +24220.jpg +29965.jpg +2487.jpg +683.jpg +6231.jpg +14845.jpg +6896.jpg +23437.jpg +7381.jpg +24670.jpg +25782.jpg +19751.jpg +1431.jpg +2194.jpg +8962.jpg +11603.jpg +23104.jpg +19713.jpg +27213.jpg +15735.jpg +1198.jpg +21415.jpg +29213.jpg +11878.jpg +2115.jpg +25571.jpg +26353.jpg +9307.jpg +6103.jpg +7145.jpg +232.jpg +12766.jpg +14358.jpg +5914.jpg +8595.jpg +6184.jpg +1363.jpg +2176.jpg +27905.jpg +14374.jpg +9692.jpg +29208.jpg +18553.jpg +16455.jpg +9849.jpg +18296.jpg +28657.jpg +28854.jpg +28326.jpg +28043.jpg +22.jpg +14333.jpg +21303.jpg +1826.jpg +8998.jpg +11959.jpg +10026.jpg +13627.jpg +28527.jpg +17504.jpg +18878.jpg +19221.jpg +25249.jpg +23477.jpg +20537.jpg +11772.jpg +9444.jpg +9475.jpg +14519.jpg +16643.jpg +14238.jpg +17145.jpg +18067.jpg +4248.jpg +28924.jpg +5967.jpg +26497.jpg +26043.jpg +16352.jpg +21148.jpg +27764.jpg +21918.jpg +16788.jpg +16252.jpg +20511.jpg +8177.jpg +29934.jpg +24562.jpg +20329.jpg +16061.jpg +25098.jpg +13354.jpg +15565.jpg +17286.jpg +12063.jpg +18974.jpg +12052.jpg +22270.jpg +5987.jpg +2827.jpg +24593.jpg +22935.jpg +15345.jpg +12704.jpg +10058.jpg +21991.jpg +3923.jpg +177.jpg +26839.jpg +28592.jpg +29296.jpg +11077.jpg +13502.jpg +10574.jpg +27127.jpg +18492.jpg +1317.jpg +12479.jpg +16636.jpg +2187.jpg +9798.jpg +20325.jpg +12408.jpg +16163.jpg +10435.jpg +23732.jpg +250.jpg +14102.jpg +3413.jpg +29246.jpg +22802.jpg +14221.jpg +1139.jpg +9966.jpg +14543.jpg +5669.jpg +5666.jpg +10993.jpg +8814.jpg +28565.jpg +20810.jpg +14423.jpg +2926.jpg +22568.jpg +5206.jpg +25979.jpg +23061.jpg +25914.jpg +26406.jpg +18896.jpg +5736.jpg +11167.jpg +10967.jpg +17718.jpg +20019.jpg +17470.jpg +12693.jpg +13195.jpg +22304.jpg +29909.jpg +25637.jpg +4335.jpg +10459.jpg +15875.jpg +19068.jpg +23849.jpg +6295.jpg +26333.jpg +21957.jpg +29604.jpg +25644.jpg +22845.jpg +21762.jpg +25548.jpg +14877.jpg +1141.jpg +3769.jpg +20219.jpg +21640.jpg +1624.jpg +16945.jpg +148.jpg +20015.jpg +24904.jpg +23949.jpg +13559.jpg +7275.jpg +6148.jpg +17198.jpg +7552.jpg +12894.jpg +25446.jpg +4979.jpg +20145.jpg +1727.jpg +26598.jpg +28435.jpg +21472.jpg +13445.jpg +18344.jpg +7662.jpg +14878.jpg +19571.jpg +22729.jpg +5927.jpg +10552.jpg +10795.jpg +28409.jpg +24546.jpg +25473.jpg +1706.jpg +22639.jpg +28367.jpg +18445.jpg +14053.jpg +551.jpg +23273.jpg +9802.jpg +8149.jpg +25590.jpg +1443.jpg +16684.jpg +6499.jpg +3029.jpg +11041.jpg +9500.jpg +2432.jpg +26577.jpg +16734.jpg +20473.jpg +5029.jpg +15887.jpg +19589.jpg +11236.jpg +23785.jpg +5307.jpg +24599.jpg +25077.jpg +9409.jpg +1544.jpg +25320.jpg +23986.jpg +7821.jpg +20745.jpg +18604.jpg +11978.jpg +23976.jpg +4418.jpg +19082.jpg +16376.jpg +20703.jpg +14302.jpg +2352.jpg +4439.jpg +5711.jpg +23739.jpg +3047.jpg +2881.jpg +16152.jpg +18158.jpg +21498.jpg +1138.jpg +4935.jpg +18569.jpg +29957.jpg +4838.jpg +14975.jpg +2446.jpg +14770.jpg +2502.jpg +5816.jpg +9134.jpg +12320.jpg +24952.jpg +17224.jpg +27607.jpg +10654.jpg +10336.jpg +1132.jpg +12345.jpg +2752.jpg +2928.jpg +11807.jpg +4326.jpg +6540.jpg +4888.jpg +16791.jpg +29494.jpg +25848.jpg +29255.jpg +26040.jpg +9187.jpg +7967.jpg +9689.jpg +3553.jpg +3807.jpg +10487.jpg +25643.jpg +17439.jpg +23565.jpg +22748.jpg +8394.jpg +5627.jpg +7569.jpg +1881.jpg +3408.jpg +9893.jpg +4744.jpg +23008.jpg +11326.jpg +10275.jpg +29128.jpg +11706.jpg +9120.jpg +22190.jpg +23810.jpg +2205.jpg +16974.jpg +27737.jpg +8375.jpg +9915.jpg +6937.jpg +15592.jpg +1893.jpg +19854.jpg +5123.jpg +28163.jpg +28505.jpg +6371.jpg +27815.jpg +23406.jpg +488.jpg +24137.jpg +15722.jpg +26428.jpg +4152.jpg +9189.jpg +8809.jpg +27019.jpg +10826.jpg +9226.jpg +6388.jpg +12240.jpg +2386.jpg +16017.jpg +9174.jpg +29583.jpg +7656.jpg +19096.jpg +25099.jpg +16314.jpg +18466.jpg +22788.jpg +10846.jpg +2412.jpg +27813.jpg +23908.jpg +20489.jpg +22877.jpg +14940.jpg +18668.jpg +17539.jpg +1829.jpg +3734.jpg +19378.jpg +10350.jpg +16969.jpg +2995.jpg +5187.jpg +19717.jpg +9152.jpg +24666.jpg +17787.jpg +6700.jpg +27494.jpg +14832.jpg +11222.jpg +580.jpg +14041.jpg +27409.jpg +1931.jpg +19510.jpg +15354.jpg +17608.jpg +8127.jpg +22857.jpg +8667.jpg +5973.jpg +22859.jpg +23426.jpg +3141.jpg +1922.jpg +28365.jpg +1583.jpg +27308.jpg +4970.jpg +17891.jpg +12030.jpg +14559.jpg +23697.jpg +23190.jpg +2611.jpg +2888.jpg +21840.jpg +1745.jpg +19081.jpg +10262.jpg +19609.jpg +8162.jpg +25427.jpg +3038.jpg +8543.jpg +19662.jpg +25111.jpg +8955.jpg +12152.jpg +28995.jpg +2792.jpg +27966.jpg +12514.jpg +6861.jpg +25366.jpg +2991.jpg +17017.jpg +11318.jpg +7955.jpg +3915.jpg +2879.jpg +28897.jpg +24555.jpg +10742.jpg +6309.jpg +1013.jpg +334.jpg +27711.jpg +10634.jpg +3216.jpg +9293.jpg +25684.jpg +10201.jpg +26464.jpg +11658.jpg +24979.jpg +11362.jpg +13566.jpg +17760.jpg +10516.jpg +25746.jpg +26884.jpg +3847.jpg +19145.jpg +7294.jpg +25562.jpg +24791.jpg +13180.jpg +9670.jpg +7462.jpg +29420.jpg +27158.jpg +6853.jpg +29135.jpg +29757.jpg +28858.jpg +27061.jpg +10705.jpg +14066.jpg +2292.jpg +5725.jpg +15526.jpg +22076.jpg +8072.jpg +14090.jpg +8502.jpg +7867.jpg +19122.jpg +25192.jpg +22341.jpg +15815.jpg +17799.jpg +27596.jpg +3106.jpg +13531.jpg +22366.jpg +19181.jpg +2553.jpg +6329.jpg +12604.jpg +15379.jpg +7475.jpg +22376.jpg +25509.jpg +18181.jpg +27059.jpg +12045.jpg +28108.jpg +461.jpg +20707.jpg +5941.jpg +10441.jpg +6990.jpg +9442.jpg +19452.jpg +10940.jpg +26055.jpg +4690.jpg +15113.jpg +7970.jpg +4480.jpg +29769.jpg +26665.jpg +18069.jpg +12422.jpg +24292.jpg +24443.jpg +12081.jpg +4869.jpg +14023.jpg +5282.jpg +23255.jpg +29148.jpg +27198.jpg +3201.jpg +24287.jpg +18568.jpg +1208.jpg +1941.jpg +27021.jpg +12108.jpg +23934.jpg +19815.jpg +6841.jpg +18098.jpg +14905.jpg +29605.jpg +582.jpg +21860.jpg +17074.jpg +13837.jpg +10195.jpg +16931.jpg +21580.jpg +6651.jpg +27103.jpg +24441.jpg +21066.jpg +13269.jpg +12971.jpg +16772.jpg +10844.jpg +17989.jpg +26634.jpg +7125.jpg +2826.jpg +19706.jpg +29926.jpg +11119.jpg +29477.jpg +17827.jpg +8989.jpg +4400.jpg +6141.jpg +13114.jpg +866.jpg +13899.jpg +28561.jpg +3454.jpg +14441.jpg +1842.jpg +21790.jpg +809.jpg +12775.jpg +19591.jpg +12355.jpg +25604.jpg +23511.jpg +17605.jpg +18163.jpg +29567.jpg +18260.jpg +195.jpg +13170.jpg +24353.jpg +9961.jpg +24954.jpg +7899.jpg +19083.jpg +17438.jpg +10080.jpg +11156.jpg +1997.jpg +15925.jpg +14450.jpg +27847.jpg +67.jpg +22944.jpg +15760.jpg +20601.jpg +19761.jpg +10719.jpg +11124.jpg +22782.jpg +13635.jpg +27871.jpg +11329.jpg +11714.jpg +17219.jpg +29029.jpg +15437.jpg +10898.jpg +13820.jpg +20529.jpg +2427.jpg +1531.jpg +24604.jpg +8275.jpg +13193.jpg +12538.jpg +27144.jpg +22766.jpg +28970.jpg +2681.jpg +1735.jpg +1318.jpg +28004.jpg +23844.jpg +15252.jpg +18145.jpg +16800.jpg +13520.jpg +3542.jpg +15456.jpg +15961.jpg +12720.jpg +15263.jpg +9985.jpg +28253.jpg +17126.jpg +12649.jpg +23470.jpg +22837.jpg +24437.jpg +26127.jpg +15103.jpg +25054.jpg +29839.jpg +21606.jpg +26473.jpg +5293.jpg +1105.jpg +18258.jpg +16185.jpg +8775.jpg +28539.jpg +14305.jpg +28094.jpg +2596.jpg +7018.jpg +22843.jpg +11873.jpg +25074.jpg +29058.jpg +3653.jpg +14875.jpg +12655.jpg +3212.jpg +11792.jpg +13732.jpg +4502.jpg +10641.jpg +5390.jpg +10060.jpg +16963.jpg +16519.jpg +3182.jpg +17352.jpg +20836.jpg +12060.jpg +344.jpg +29601.jpg +20152.jpg +20487.jpg +20302.jpg +17421.jpg +11664.jpg +24924.jpg +24111.jpg +14831.jpg +3774.jpg +10885.jpg +15365.jpg +14630.jpg +12217.jpg +12676.jpg +21773.jpg +20021.jpg +11725.jpg +25273.jpg +10982.jpg +23581.jpg +20900.jpg +18104.jpg +15677.jpg +18718.jpg +20445.jpg +26283.jpg +3143.jpg +12957.jpg +6491.jpg +21152.jpg +27175.jpg +7339.jpg +24290.jpg +11095.jpg +10784.jpg +7331.jpg +5789.jpg +19850.jpg +4320.jpg +15298.jpg +24363.jpg +13225.jpg +18941.jpg +12860.jpg +20735.jpg +15880.jpg +4295.jpg +10723.jpg +11097.jpg +10908.jpg +12723.jpg +22363.jpg +12721.jpg +5992.jpg +21113.jpg +29677.jpg +19232.jpg +21376.jpg +17909.jpg +16925.jpg +18336.jpg +5591.jpg +14215.jpg +25275.jpg +11690.jpg +28767.jpg +28668.jpg +12020.jpg +8576.jpg +25498.jpg +64.jpg +22708.jpg +15975.jpg +10123.jpg +6085.jpg +4455.jpg +22697.jpg +28296.jpg +27380.jpg +5597.jpg +25503.jpg +18231.jpg +25344.jpg +11868.jpg +2117.jpg +1124.jpg +22596.jpg +23294.jpg +27724.jpg +28039.jpg +4082.jpg +26.jpg +2957.jpg +22825.jpg +10958.jpg +19205.jpg +4412.jpg +27602.jpg +13667.jpg +6947.jpg +29560.jpg +20464.jpg +25338.jpg +8925.jpg +18144.jpg +10135.jpg +27167.jpg +13436.jpg +7201.jpg +9929.jpg +27292.jpg +13232.jpg +11452.jpg +26260.jpg +18407.jpg +21513.jpg +17543.jpg +12265.jpg +4144.jpg +21037.jpg +23832.jpg +19047.jpg +14236.jpg +17158.jpg +10266.jpg +14432.jpg +8407.jpg +208.jpg +2621.jpg +21125.jpg +25364.jpg +819.jpg +2010.jpg +14749.jpg +20615.jpg +29237.jpg +12987.jpg +18969.jpg +29469.jpg +4302.jpg +13557.jpg +18080.jpg +2975.jpg +16195.jpg +26949.jpg +28247.jpg +3565.jpg +18249.jpg +23401.jpg +11206.jpg +5609.jpg +12090.jpg +5621.jpg +15046.jpg +2336.jpg +26093.jpg +24744.jpg +7410.jpg +28388.jpg +20300.jpg +571.jpg +8210.jpg +28265.jpg +17914.jpg +19624.jpg +29371.jpg +29892.jpg +17091.jpg +12250.jpg +3605.jpg +16545.jpg +7744.jpg +4421.jpg +17578.jpg +2454.jpg +13036.jpg +27065.jpg +15468.jpg +29617.jpg +28385.jpg +18079.jpg +4004.jpg +17205.jpg +476.jpg +26215.jpg +9986.jpg +2328.jpg +7325.jpg +15872.jpg +4954.jpg +20594.jpg +22654.jpg +21290.jpg +4861.jpg +20110.jpg +6244.jpg +20177.jpg +661.jpg +8351.jpg +22762.jpg +15738.jpg +19339.jpg +27930.jpg +21523.jpg +17492.jpg +11737.jpg +17623.jpg +18294.jpg +3673.jpg +11353.jpg +18753.jpg +7328.jpg +7879.jpg +28161.jpg +10520.jpg +3117.jpg +4654.jpg +8255.jpg +4569.jpg +13709.jpg +26124.jpg +24725.jpg +11529.jpg +3715.jpg +5969.jpg +9452.jpg +28849.jpg +9896.jpg +18802.jpg +26629.jpg +11954.jpg +8870.jpg +29192.jpg +26166.jpg +18670.jpg +4255.jpg +6447.jpg +6764.jpg +18109.jpg +12714.jpg +15316.jpg +8976.jpg +13465.jpg +7262.jpg +20468.jpg +24731.jpg +15630.jpg +15157.jpg +7981.jpg +26792.jpg +10593.jpg +17476.jpg +3353.jpg +11632.jpg +6604.jpg +7434.jpg +578.jpg +15716.jpg +7148.jpg +14344.jpg +21842.jpg +6444.jpg +17810.jpg +25041.jpg +27821.jpg +27212.jpg +7456.jpg +11721.jpg +12489.jpg +16578.jpg +13289.jpg +8129.jpg +24746.jpg +12000.jpg +28406.jpg +16638.jpg +8362.jpg +4622.jpg +22861.jpg +18179.jpg +5023.jpg +5920.jpg +16520.jpg +3061.jpg +19823.jpg +28567.jpg +4498.jpg +10511.jpg +15063.jpg +28392.jpg +11106.jpg +23561.jpg +25088.jpg +17542.jpg +25812.jpg +12295.jpg +13469.jpg +14334.jpg +20957.jpg +11226.jpg +108.jpg +16101.jpg +16760.jpg +9511.jpg +25205.jpg +29483.jpg +25286.jpg +26693.jpg +1898.jpg +1164.jpg +5330.jpg +17172.jpg +9901.jpg +12486.jpg +21104.jpg +20923.jpg +19102.jpg +5448.jpg +13363.jpg +4593.jpg +11253.jpg +21479.jpg +21612.jpg +16700.jpg +26372.jpg +20556.jpg +8711.jpg +3878.jpg +21806.jpg +19395.jpg +10812.jpg +9065.jpg +17356.jpg +21947.jpg +16783.jpg +7571.jpg +18502.jpg +1328.jpg +26009.jpg +3943.jpg +3738.jpg +12021.jpg +836.jpg +26192.jpg +28040.jpg +1684.jpg +28452.jpg +17514.jpg +18154.jpg +21599.jpg +19072.jpg +630.jpg +14111.jpg +13132.jpg +9481.jpg +23640.jpg +19570.jpg +5210.jpg +24214.jpg +15784.jpg +173.jpg +24343.jpg +5214.jpg +20582.jpg +6725.jpg +21914.jpg +26777.jpg +15057.jpg +15454.jpg +3568.jpg +12370.jpg +25753.jpg +27314.jpg +10831.jpg +11764.jpg +6076.jpg +27958.jpg +28376.jpg +28659.jpg +29938.jpg +21273.jpg +19635.jpg +4996.jpg +7789.jpg +11652.jpg +28415.jpg +1283.jpg +12526.jpg +13235.jpg +14754.jpg +22007.jpg +23038.jpg +4940.jpg +24795.jpg +15159.jpg +1401.jpg +16690.jpg +14107.jpg +16929.jpg +23208.jpg +66.jpg +6869.jpg +16024.jpg +12359.jpg +24595.jpg +28869.jpg +21349.jpg +11389.jpg +7808.jpg +20118.jpg +21220.jpg +10442.jpg +22487.jpg +11283.jpg +18149.jpg +15097.jpg +4638.jpg +6403.jpg +586.jpg +28547.jpg +5360.jpg +7741.jpg +28387.jpg +11931.jpg +22572.jpg +26544.jpg +4079.jpg +23672.jpg +28058.jpg +2876.jpg +11950.jpg +7369.jpg +13111.jpg +5363.jpg +9854.jpg +24905.jpg +3463.jpg +11125.jpg +20222.jpg +4624.jpg +7929.jpg +7853.jpg +22641.jpg +12166.jpg +20686.jpg +29932.jpg +14313.jpg +6716.jpg +28425.jpg +4632.jpg +17907.jpg +19603.jpg +7459.jpg +22250.jpg +23408.jpg +27664.jpg +19657.jpg +18768.jpg +1095.jpg +12487.jpg +23466.jpg +16479.jpg +28682.jpg +9200.jpg +25558.jpg +13549.jpg +28224.jpg +8550.jpg +4892.jpg +27142.jpg +28619.jpg +24277.jpg +10106.jpg +23451.jpg +10199.jpg +2788.jpg +23218.jpg +18150.jpg +18210.jpg +19754.jpg +10345.jpg +29211.jpg +29010.jpg +2918.jpg +230.jpg +29456.jpg +19425.jpg +14247.jpg +27627.jpg +22582.jpg +25299.jpg +22500.jpg +26849.jpg +6056.jpg +11057.jpg +21922.jpg +14383.jpg +6542.jpg +17024.jpg +5150.jpg +17778.jpg +27713.jpg +1286.jpg +27109.jpg +7097.jpg +16244.jpg +28280.jpg +19284.jpg +1250.jpg +22462.jpg +3651.jpg +25318.jpg +16947.jpg +29683.jpg +29008.jpg +6297.jpg +27521.jpg +8163.jpg +10377.jpg +28708.jpg +861.jpg +22585.jpg +13437.jpg +8850.jpg +17434.jpg +9282.jpg +18880.jpg +6014.jpg +28368.jpg +16484.jpg +16667.jpg +2394.jpg +22305.jpg +13586.jpg +7336.jpg +14346.jpg +18386.jpg +1540.jpg +18270.jpg +6494.jpg +13154.jpg +28950.jpg +15789.jpg +13879.jpg +16555.jpg +20607.jpg +18076.jpg +1024.jpg +26039.jpg +9743.jpg +26445.jpg +8643.jpg +13070.jpg +22881.jpg +24612.jpg +23893.jpg +2739.jpg +19626.jpg +11157.jpg +19330.jpg +28109.jpg +14804.jpg +27481.jpg +27253.jpg +17410.jpg +21087.jpg +13642.jpg +25659.jpg +2173.jpg +29429.jpg +15126.jpg +29104.jpg +2958.jpg +9296.jpg +14838.jpg +15683.jpg +7477.jpg +21052.jpg +20655.jpg +19307.jpg +17211.jpg +11028.jpg +8312.jpg +1792.jpg +29339.jpg +3496.jpg +8069.jpg +5183.jpg +15250.jpg +13143.jpg +22812.jpg +20343.jpg +21559.jpg +13877.jpg +21388.jpg +7224.jpg +9552.jpg +29525.jpg +6914.jpg +1796.jpg +12627.jpg +27364.jpg +2312.jpg +8080.jpg +28654.jpg +18355.jpg +23125.jpg +5194.jpg +24877.jpg +3925.jpg +18089.jpg +22870.jpg +28478.jpg +3381.jpg +22411.jpg +19986.jpg +2221.jpg +12500.jpg +20207.jpg +9469.jpg +6363.jpg +12592.jpg +25422.jpg +21440.jpg +2209.jpg +29351.jpg +43.jpg +21802.jpg +27432.jpg +7623.jpg +18785.jpg +19618.jpg +3648.jpg +25042.jpg +11276.jpg +3649.jpg +14412.jpg +24585.jpg +11006.jpg +27544.jpg +23615.jpg +324.jpg +5547.jpg +16131.jpg +21135.jpg +21238.jpg +236.jpg +12482.jpg +28111.jpg +26673.jpg +17978.jpg +27040.jpg +25935.jpg +25787.jpg +7393.jpg +15186.jpg +13252.jpg +17267.jpg +5734.jpg +11789.jpg +22916.jpg +12544.jpg +13003.jpg +26275.jpg +4759.jpg +19356.jpg +2869.jpg +4629.jpg +23370.jpg +23566.jpg +13955.jpg +11955.jpg +6358.jpg +24942.jpg +26178.jpg +17531.jpg +21588.jpg +17395.jpg +29059.jpg +6385.jpg +16691.jpg +18934.jpg +20327.jpg +15778.jpg +25652.jpg +27459.jpg +23605.jpg +6156.jpg +21424.jpg +22842.jpg +15561.jpg +15967.jpg +2248.jpg +4588.jpg +1673.jpg +27985.jpg +1840.jpg +470.jpg +13765.jpg +15469.jpg +14243.jpg +21116.jpg +21287.jpg +19872.jpg +24743.jpg +4959.jpg +25614.jpg +25661.jpg +10431.jpg +9831.jpg +9234.jpg +9659.jpg +14730.jpg +17811.jpg +27854.jpg +15358.jpg +21745.jpg +8344.jpg +20065.jpg +24419.jpg +464.jpg +13840.jpg +21150.jpg +19084.jpg +20358.jpg +1199.jpg +20281.jpg +16159.jpg +12613.jpg +9030.jpg +3199.jpg +2327.jpg +22234.jpg +11277.jpg +4141.jpg +5268.jpg +28599.jpg +4645.jpg +21921.jpg +29022.jpg +25362.jpg +10358.jpg +8750.jpg +22011.jpg +24004.jpg +27568.jpg +5474.jpg +7763.jpg +6400.jpg +17803.jpg +17906.jpg +948.jpg +18842.jpg +26639.jpg +1489.jpg +970.jpg +3275.jpg +13895.jpg +1334.jpg +24188.jpg +27972.jpg +606.jpg +18976.jpg +3627.jpg +21350.jpg +7668.jpg +3066.jpg +22958.jpg +6188.jpg +15129.jpg +26452.jpg +25113.jpg +16432.jpg +20134.jpg +12840.jpg +28553.jpg +4976.jpg +7576.jpg +4627.jpg +8174.jpg +19287.jpg +23757.jpg +13205.jpg +15043.jpg +14403.jpg +11795.jpg +26800.jpg +6987.jpg +25749.jpg +17860.jpg +3803.jpg +12003.jpg +49.jpg +21109.jpg +12374.jpg +1118.jpg +21428.jpg +6298.jpg +26365.jpg +27855.jpg +2939.jpg +24281.jpg +15747.jpg +3796.jpg +16648.jpg +13016.jpg +18360.jpg +6143.jpg +9677.jpg +5903.jpg +9965.jpg +15465.jpg +540.jpg +11147.jpg +15835.jpg +28847.jpg +23105.jpg +11123.jpg +11442.jpg +20796.jpg +29199.jpg +8393.jpg +8995.jpg +2037.jpg +5686.jpg +8689.jpg +7649.jpg +3350.jpg +25303.jpg +24689.jpg +13186.jpg +5230.jpg +20120.jpg +23291.jpg +25026.jpg +26493.jpg +22013.jpg +21204.jpg +11110.jpg +8597.jpg +15265.jpg +12155.jpg +10117.jpg +33.jpg +2847.jpg +9332.jpg +27684.jpg +9047.jpg +7130.jpg +28604.jpg +24339.jpg +10790.jpg +2019.jpg +14039.jpg +16600.jpg +29885.jpg +234.jpg +24289.jpg +16404.jpg +4507.jpg +611.jpg +21872.jpg +25002.jpg +28205.jpg +10517.jpg +5250.jpg +5157.jpg +5164.jpg +1849.jpg +6412.jpg +24298.jpg +2895.jpg +15585.jpg +15478.jpg +15003.jpg +6732.jpg +2597.jpg +28951.jpg +28324.jpg +18414.jpg +15865.jpg +21531.jpg +3049.jpg +4745.jpg +8451.jpg +11509.jpg +28891.jpg +9111.jpg +257.jpg +11122.jpg +28356.jpg +14213.jpg +3547.jpg +26011.jpg +9834.jpg +26096.jpg +5217.jpg +5781.jpg +5787.jpg +27742.jpg +4397.jpg +5546.jpg +18428.jpg +18451.jpg +4158.jpg +2113.jpg +2308.jpg +12736.jpg +3710.jpg +26036.jpg +22518.jpg +3714.jpg +9840.jpg +27610.jpg +27117.jpg +1452.jpg +9595.jpg +1254.jpg +22756.jpg +6647.jpg +3701.jpg +24775.jpg +15281.jpg +5020.jpg +20631.jpg +18576.jpg +18221.jpg +23644.jpg +8698.jpg +657.jpg +19892.jpg +13864.jpg +4708.jpg +29571.jpg +4661.jpg +7014.jpg +2574.jpg +2201.jpg +25152.jpg +29731.jpg +9664.jpg +888.jpg +28675.jpg +16430.jpg +7009.jpg +21391.jpg +11376.jpg +16777.jpg +6209.jpg +24763.jpg +1651.jpg +12546.jpg +19413.jpg +17487.jpg +10071.jpg +8229.jpg +4914.jpg +5129.jpg +24630.jpg +22012.jpg +23979.jpg +17451.jpg +3586.jpg +18208.jpg +17524.jpg +6090.jpg +17415.jpg +17014.jpg +14865.jpg +8721.jpg +25007.jpg +12415.jpg +22791.jpg +2563.jpg +15780.jpg +16820.jpg +26862.jpg +25852.jpg +25967.jpg +2413.jpg +17252.jpg +25345.jpg +1614.jpg +7989.jpg +10601.jpg +6361.jpg +2825.jpg +20639.jpg +184.jpg +2459.jpg +2381.jpg +22422.jpg +18841.jpg +24211.jpg +971.jpg +7632.jpg +9905.jpg +13064.jpg +2561.jpg +7291.jpg +4704.jpg +5039.jpg +12988.jpg +19349.jpg +12955.jpg +18957.jpg +19951.jpg +20853.jpg +15384.jpg +1030.jpg +15751.jpg +20944.jpg +5618.jpg +7123.jpg +15245.jpg +7818.jpg +20230.jpg +15509.jpg +9054.jpg +1519.jpg +14192.jpg +18831.jpg +594.jpg +28981.jpg +23602.jpg +7274.jpg +6497.jpg +2937.jpg +27761.jpg +16449.jpg +8399.jpg +5956.jpg +27942.jpg +21096.jpg +14991.jpg +9569.jpg +14463.jpg +27119.jpg +11893.jpg +11808.jpg +26602.jpg +9032.jpg +16316.jpg +8730.jpg +16231.jpg +26120.jpg +24559.jpg +515.jpg +14941.jpg +14329.jpg +6233.jpg +5610.jpg +23763.jpg +2434.jpg +14785.jpg +4294.jpg +13819.jpg +21714.jpg +7142.jpg +10861.jpg +20809.jpg +1115.jpg +736.jpg +45.jpg +29463.jpg +20023.jpg +5326.jpg +4830.jpg +26349.jpg +4531.jpg +4146.jpg +15536.jpg +21604.jpg +29748.jpg +27625.jpg +8230.jpg +22930.jpg +8871.jpg +3124.jpg +20463.jpg +20998.jpg +727.jpg +5446.jpg +13028.jpg +21329.jpg +29425.jpg +21730.jpg +20817.jpg +23058.jpg +1009.jpg +27490.jpg +3941.jpg +20350.jpg +8731.jpg +22425.jpg +10752.jpg +28153.jpg +16617.jpg +5724.jpg +11312.jpg +23624.jpg +13077.jpg +11245.jpg +6787.jpg +1955.jpg +12779.jpg +2440.jpg +15675.jpg +24837.jpg +24729.jpg +14843.jpg +15136.jpg +20320.jpg +4659.jpg +5190.jpg +5853.jpg +24177.jpg +11415.jpg +13946.jpg +4845.jpg +29857.jpg +601.jpg +1214.jpg +4059.jpg +16262.jpg +13203.jpg +21081.jpg +2749.jpg +21342.jpg +20093.jpg +13398.jpg +2161.jpg +36.jpg +20629.jpg +4730.jpg +15226.jpg +5692.jpg +26484.jpg +13713.jpg +18429.jpg +4776.jpg +233.jpg +24933.jpg +22745.jpg +18124.jpg +27445.jpg +18886.jpg +16878.jpg +17134.jpg +9685.jpg +2171.jpg +24859.jpg +2554.jpg +4460.jpg +642.jpg +2342.jpg +20533.jpg +27514.jpg +7511.jpg +9953.jpg +8466.jpg +18747.jpg +2977.jpg +6860.jpg +19468.jpg +13236.jpg +10253.jpg +17549.jpg +21751.jpg +11361.jpg +5398.jpg +27722.jpg +17844.jpg +10227.jpg +3221.jpg +27537.jpg +6930.jpg +16384.jpg +26974.jpg +1394.jpg +15820.jpg +19470.jpg +28667.jpg +16409.jpg +1238.jpg +6326.jpg +12770.jpg +29585.jpg +1709.jpg +7216.jpg +21567.jpg +20778.jpg +24118.jpg +15933.jpg +9936.jpg +11181.jpg +15970.jpg +12223.jpg +834.jpg +21642.jpg +9663.jpg +5566.jpg +14929.jpg +5605.jpg +20286.jpg +15308.jpg +4115.jpg +16023.jpg +4754.jpg +13624.jpg +14380.jpg +22200.jpg +22365.jpg +6317.jpg +9853.jpg +9400.jpg +27706.jpg +27441.jpg +14551.jpg +11477.jpg +22614.jpg +610.jpg +28201.jpg +21411.jpg +3312.jpg +17783.jpg +4768.jpg +19550.jpg +7626.jpg +11582.jpg +28194.jpg +2023.jpg +15297.jpg +5355.jpg +20894.jpg +21143.jpg +6934.jpg +2901.jpg +29153.jpg +21752.jpg +3777.jpg +28330.jpg +27104.jpg +22280.jpg +19305.jpg +24742.jpg +10273.jpg +9160.jpg +27959.jpg +18946.jpg +374.jpg +18666.jpg +9149.jpg +27741.jpg +7806.jpg +3598.jpg +24901.jpg +28214.jpg +22282.jpg +15146.jpg +3768.jpg +13268.jpg +5771.jpg +22826.jpg +12228.jpg +20983.jpg +4728.jpg +18082.jpg +1167.jpg +15515.jpg +2436.jpg +3367.jpg +20719.jpg +3870.jpg +21490.jpg +9921.jpg +29405.jpg +13833.jpg +22317.jpg +24500.jpg +12507.jpg +2745.jpg +4023.jpg +10076.jpg +8200.jpg +11444.jpg +187.jpg +4449.jpg +16049.jpg +15273.jpg +24758.jpg +17973.jpg +10731.jpg +4251.jpg +24203.jpg +24944.jpg +21976.jpg +13110.jpg +11722.jpg +12169.jpg +17021.jpg +14109.jpg +7197.jpg +10344.jpg +1120.jpg +12104.jpg +19705.jpg +22898.jpg +9699.jpg +28769.jpg +28418.jpg +20444.jpg +2055.jpg +20654.jpg +25218.jpg +5300.jpg +3386.jpg +11456.jpg +13340.jpg +11460.jpg +27787.jpg +2520.jpg +26193.jpg +20888.jpg +12420.jpg +770.jpg +19426.jpg +15318.jpg +11526.jpg +3305.jpg +2367.jpg +12605.jpg +9851.jpg +26741.jpg +21292.jpg +23444.jpg +13290.jpg +27137.jpg +24611.jpg +512.jpg +23497.jpg +20017.jpg +26006.jpg +11648.jpg +25888.jpg +3099.jpg +27063.jpg +9071.jpg +11385.jpg +22414.jpg +22208.jpg +4631.jpg +19392.jpg +11853.jpg +10681.jpg +7543.jpg +15929.jpg +13061.jpg +4715.jpg +15251.jpg +16220.jpg +17591.jpg +2738.jpg +23030.jpg +22239.jpg +7618.jpg +10170.jpg +3158.jpg +5613.jpg +5888.jpg +7839.jpg +2175.jpg +4680.jpg +15819.jpg +28637.jpg +8737.jpg +16561.jpg +14460.jpg +4434.jpg +23630.jpg +12282.jpg +5919.jpg +2610.jpg +2313.jpg +28026.jpg +4739.jpg +19692.jpg +29808.jpg +12806.jpg +16208.jpg +26408.jpg +20146.jpg +4263.jpg +21641.jpg +25830.jpg +14928.jpg +16382.jpg +16674.jpg +16257.jpg +19094.jpg +21868.jpg +20486.jpg +23885.jpg +14337.jpg +25442.jpg +24893.jpg +29331.jpg +28218.jpg +15333.jpg +19062.jpg +11887.jpg +28842.jpg +19235.jpg +14938.jpg +18365.jpg +6723.jpg +17586.jpg +25837.jpg +18034.jpg +5471.jpg +6912.jpg +841.jpg +26440.jpg +15332.jpg +18371.jpg +11264.jpg +22781.jpg +29700.jpg +1973.jpg +5830.jpg +9571.jpg +29436.jpg +24920.jpg +15863.jpg +22044.jpg +14004.jpg +13105.jpg +14819.jpg +16890.jpg +26417.jpg +13618.jpg +812.jpg +17800.jpg +20002.jpg +23118.jpg +171.jpg +2258.jpg +6238.jpg +27524.jpg +22631.jpg +15697.jpg +24577.jpg +27667.jpg +7536.jpg +4312.jpg +23048.jpg +7301.jpg +19958.jpg +17668.jpg +15195.jpg +10133.jpg +17058.jpg +9445.jpg +17877.jpg +12513.jpg +14736.jpg +21145.jpg +5021.jpg +16998.jpg +8187.jpg +6690.jpg +6605.jpg +10468.jpg +26630.jpg +18922.jpg +13246.jpg +15026.jpg +24315.jpg +21737.jpg +12739.jpg +1517.jpg +27746.jpg +5918.jpg +2072.jpg +5198.jpg +7607.jpg +14153.jpg +9503.jpg +23297.jpg +23475.jpg +414.jpg +27520.jpg +4170.jpg +19636.jpg +20419.jpg +21271.jpg +29944.jpg +14685.jpg +21060.jpg +28711.jpg +7221.jpg +28913.jpg +23076.jpg +20916.jpg +2000.jpg +25955.jpg +8208.jpg +16935.jpg +13308.jpg +14268.jpg +10003.jpg +9070.jpg +13544.jpg +13775.jpg +18873.jpg +12400.jpg +2223.jpg +23706.jpg +27371.jpg +10978.jpg +20142.jpg +14249.jpg +21577.jpg +10880.jpg +12789.jpg +9456.jpg +21322.jpg +6868.jpg +24619.jpg +20317.jpg +27443.jpg +7310.jpg +8474.jpg +26494.jpg +6375.jpg +24878.jpg +27242.jpg +2449.jpg +4398.jpg +24891.jpg +25719.jpg +23611.jpg +1038.jpg +16032.jpg +20672.jpg +8483.jpg +2750.jpg +1227.jpg +1705.jpg +16006.jpg +24221.jpg +11747.jpg +21397.jpg +21801.jpg +25147.jpg +3031.jpg +11170.jpg +16547.jpg +17903.jpg +28978.jpg +26289.jpg +3024.jpg +23978.jpg +801.jpg +27554.jpg +27623.jpg +8361.jpg +18068.jpg +12952.jpg +16866.jpg +21516.jpg +3987.jpg +3946.jpg +21050.jpg +10882.jpg +2267.jpg +27341.jpg +13499.jpg +10023.jpg +7864.jpg +29036.jpg +13137.jpg +6782.jpg +6820.jpg +21895.jpg +28185.jpg +10421.jpg +5991.jpg +9478.jpg +4980.jpg +28050.jpg +5966.jpg +20729.jpg +4178.jpg +2732.jpg +26917.jpg +13989.jpg +20826.jpg +28719.jpg +4227.jpg +11522.jpg +26952.jpg +5757.jpg +7158.jpg +11379.jpg +25981.jpg +18671.jpg +27995.jpg +4847.jpg +5281.jpg +10143.jpg +28612.jpg +18273.jpg +18645.jpg +23430.jpg +20263.jpg +8142.jpg +25613.jpg +18455.jpg +8683.jpg +19353.jpg +27489.jpg +12824.jpg +12639.jpg +16784.jpg +29906.jpg +14992.jpg +1762.jpg +26674.jpg +17447.jpg +27312.jpg +21503.jpg +9176.jpg +21989.jpg +18378.jpg +14149.jpg +12118.jpg +7326.jpg +3119.jpg +1899.jpg +4344.jpg +24167.jpg +20920.jpg +15369.jpg +8469.jpg +12823.jpg +12964.jpg +24279.jpg +24929.jpg +2357.jpg +15680.jpg +19135.jpg +27322.jpg +6263.jpg +21759.jpg +9276.jpg +5549.jpg +28116.jpg +21879.jpg +21360.jpg +4958.jpg +7591.jpg +21799.jpg +20592.jpg +20569.jpg +19033.jpg +7219.jpg +26612.jpg +5003.jpg +9385.jpg +12696.jpg +22993.jpg +10711.jpg +13307.jpg +18562.jpg +1106.jpg +10355.jpg +20866.jpg +17821.jpg +25143.jpg +24325.jpg +24529.jpg +10332.jpg +2311.jpg +1069.jpg +12928.jpg +13448.jpg +15609.jpg +26742.jpg +10582.jpg +22597.jpg +14796.jpg +13744.jpg +16886.jpg +4249.jpg +24418.jpg +450.jpg +17770.jpg +24257.jpg +6568.jpg +14615.jpg +10120.jpg +6777.jpg +4415.jpg +15446.jpg +2411.jpg +17786.jpg +22100.jpg +17016.jpg +3474.jpg +5035.jpg +4366.jpg +19415.jpg +3027.jpg +7184.jpg +3961.jpg +16298.jpg +28071.jpg +5460.jpg +24112.jpg +28407.jpg +22423.jpg +2702.jpg +1322.jpg +4837.jpg +24516.jpg +6211.jpg +23707.jpg +12615.jpg +23377.jpg +10558.jpg +17247.jpg +428.jpg +19020.jpg +100.jpg +431.jpg +5668.jpg +23857.jpg +12813.jpg +8598.jpg +9341.jpg +20786.jpg +2724.jpg +231.jpg +21495.jpg +12232.jpg +17339.jpg +24182.jpg +6022.jpg +2227.jpg +16235.jpg +22590.jpg +471.jpg +23361.jpg +10947.jpg +14855.jpg +47.jpg +27723.jpg +21978.jpg +907.jpg +16558.jpg +23731.jpg +14999.jpg +19107.jpg +12710.jpg +2443.jpg +21525.jpg +20961.jpg +28984.jpg +13831.jpg +13847.jpg +26073.jpg +24886.jpg +28228.jpg +4289.jpg +28278.jpg +14184.jpg +3723.jpg +7599.jpg +14713.jpg +2251.jpg +2447.jpg +1511.jpg +3978.jpg +8381.jpg +4359.jpg +4589.jpg +29165.jpg +9534.jpg +65.jpg +8413.jpg +22014.jpg +8823.jpg +752.jpg +16900.jpg +20004.jpg +8618.jpg +17670.jpg +5402.jpg +9252.jpg +27552.jpg +9335.jpg +17272.jpg +26797.jpg +12176.jpg +20050.jpg +5438.jpg +203.jpg +2976.jpg +16529.jpg +6618.jpg +1081.jpg +4864.jpg +13509.jpg +25160.jpg +4313.jpg +3820.jpg +16556.jpg +15998.jpg +1609.jpg +27036.jpg +19716.jpg +23545.jpg +6761.jpg +6610.jpg +8949.jpg +7347.jpg +6132.jpg +29258.jpg +12588.jpg +25523.jpg +21021.jpg +13131.jpg +1977.jpg +4497.jpg +20161.jpg +29589.jpg +28698.jpg +3218.jpg +22925.jpg +14206.jpg +2672.jpg +8866.jpg +7917.jpg +27011.jpg +3619.jpg +2496.jpg +15008.jpg +9278.jpg +15476.jpg +18559.jpg +10498.jpg +12577.jpg +19941.jpg +15144.jpg +8932.jpg +29180.jpg +1001.jpg +19475.jpg +25622.jpg +29394.jpg +16472.jpg +16503.jpg +23991.jpg +15671.jpg +23852.jpg +4186.jpg +3976.jpg +13370.jpg +15410.jpg +6324.jpg +14273.jpg +21298.jpg +14920.jpg +28052.jpg +12390.jpg +28399.jpg +20056.jpg +7539.jpg +12401.jpg +12974.jpg +5480.jpg +16887.jpg +23540.jpg +388.jpg +26439.jpg +20361.jpg +3843.jpg +15628.jpg +1049.jpg +10035.jpg +13035.jpg +12520.jpg +18232.jpg +14282.jpg +10237.jpg +25987.jpg +28038.jpg +4035.jpg +7481.jpg +7447.jpg +17885.jpg +4247.jpg +24696.jpg +930.jpg +15850.jpg +15996.jpg +18852.jpg +13123.jpg +12204.jpg +23152.jpg +2836.jpg +8959.jpg +14352.jpg +4641.jpg +20531.jpg +14083.jpg +9360.jpg +19819.jpg +1845.jpg +23060.jpg +9300.jpg +2220.jpg +20905.jpg +14611.jpg +9265.jpg +8402.jpg +28650.jpg +11784.jpg +8078.jpg +8510.jpg +13414.jpg +26018.jpg +18226.jpg +28256.jpg +24640.jpg +12498.jpg +1841.jpg +12576.jpg +27337.jpg +19093.jpg +20685.jpg +17365.jpg +23550.jpg +9705.jpg +26913.jpg +19302.jpg +24127.jpg +16854.jpg +6028.jpg +26972.jpg +23193.jpg +22328.jpg +25090.jpg +29158.jpg +24831.jpg +22478.jpg +19990.jpg +14707.jpg +3687.jpg +27772.jpg +5947.jpg +17986.jpg +16705.jpg +3573.jpg +27851.jpg +8308.jpg +16066.jpg +10985.jpg +1017.jpg +8012.jpg +18557.jpg +26307.jpg +11491.jpg +19917.jpg +6575.jpg +5171.jpg +1876.jpg +27974.jpg +21940.jpg +14848.jpg +8465.jpg +29995.jpg +29772.jpg +18295.jpg +24584.jpg +1302.jpg +14969.jpg +25892.jpg +27924.jpg +24600.jpg +2950.jpg +19779.jpg +18135.jpg +29262.jpg +14204.jpg +21748.jpg +28204.jpg +3984.jpg +4457.jpg +6971.jpg +6059.jpg +19579.jpg +22329.jpg +9546.jpg +12916.jpg +21985.jpg +12548.jpg +17984.jpg +5311.jpg +20986.jpg +11879.jpg +14427.jpg +2337.jpg +5631.jpg +25737.jpg +23576.jpg +4459.jpg +3844.jpg +13439.jpg +9898.jpg +18907.jpg +7965.jpg +3569.jpg +25407.jpg +13951.jpg +15312.jpg +29146.jpg +5654.jpg +17774.jpg +9031.jpg +3434.jpg +4780.jpg +17281.jpg +4012.jpg +27280.jpg +5883.jpg +6623.jpg +10816.jpg +12746.jpg +5665.jpg +29925.jpg +19949.jpg +5101.jpg +18942.jpg +26812.jpg +19908.jpg +27679.jpg +15461.jpg +18537.jpg +16103.jpg +9590.jpg +21086.jpg +20091.jpg +15464.jpg +12348.jpg +4672.jpg +6384.jpg +1758.jpg +896.jpg +21673.jpg +10729.jpg +14071.jpg +29354.jpg +20578.jpg +6745.jpg +3074.jpg +29716.jpg +6984.jpg +13444.jpg +13615.jpg +29951.jpg +1866.jpg +15643.jpg +9829.jpg +10362.jpg +23074.jpg +1634.jpg +29088.jpg +19271.jpg +14629.jpg +28585.jpg +18610.jpg +10292.jpg +29283.jpg +1311.jpg +7661.jpg +29186.jpg +908.jpg +16594.jpg +20246.jpg +15980.jpg +15382.jpg +13953.jpg +25640.jpg +24311.jpg +12553.jpg +26763.jpg +28987.jpg +23674.jpg +4849.jpg +12263.jpg +127.jpg +20995.jpg +7236.jpg +1833.jpg +3226.jpg +24478.jpg +17846.jpg +10254.jpg +8199.jpg +18627.jpg +23304.jpg +6477.jpg +14984.jpg +21105.jpg +12283.jpg +22804.jpg +20307.jpg +8905.jpg +21427.jpg +3571.jpg +12834.jpg +15612.jpg +3678.jpg +10722.jpg +23447.jpg +669.jpg +21956.jpg +25657.jpg +13591.jpg +20261.jpg +29288.jpg +17325.jpg +24276.jpg +21674.jpg +16214.jpg +6870.jpg +29745.jpg +12851.jpg +12966.jpg +5554.jpg +21514.jpg +11290.jpg +17750.jpg +25685.jpg +12661.jpg +1501.jpg +20107.jpg +4013.jpg +29963.jpg +18589.jpg +10673.jpg +6559.jpg +158.jpg +2541.jpg +21719.jpg +13106.jpg +22071.jpg +21085.jpg +16745.jpg +29712.jpg +10698.jpg +16403.jpg +10392.jpg +22067.jpg +8758.jpg +21410.jpg +18993.jpg +10010.jpg +2598.jpg +1303.jpg +20747.jpg +12340.jpg +1291.jpg +17423.jpg +16260.jpg +7766.jpg +2503.jpg +10972.jpg +23705.jpg +9434.jpg +27981.jpg +6486.jpg +3965.jpg +4523.jpg +24017.jpg +20773.jpg +18597.jpg +26767.jpg +14860.jpg +23521.jpg +5127.jpg +2476.jpg +27012.jpg +29086.jpg +16427.jpg +19434.jpg +7756.jpg +7460.jpg +2455.jpg +8636.jpg +8734.jpg +9529.jpg +1466.jpg +20691.jpg +20450.jpg +8547.jpg +27696.jpg +10491.jpg +10074.jpg +2567.jpg +4486.jpg +3655.jpg +3893.jpg +13753.jpg +4095.jpg +28777.jpg +304.jpg +26644.jpg +7091.jpg +4791.jpg +3487.jpg +2768.jpg +14115.jpg +9233.jpg +18248.jpg +6974.jpg +22228.jpg +14252.jpg +24812.jpg +18499.jpg +16429.jpg +28474.jpg +9975.jpg +2195.jpg +21408.jpg +14606.jpg +17477.jpg +28916.jpg +15108.jpg +4790.jpg +1478.jpg +2759.jpg +14936.jpg +29868.jpg +26216.jpg +7902.jpg +4794.jpg +7180.jpg +4510.jpg +211.jpg +11022.jpg +1813.jpg +21506.jpg +11214.jpg +25399.jpg +5907.jpg +29645.jpg +16014.jpg +25415.jpg +29343.jpg +2234.jpg +8898.jpg +23657.jpg +24968.jpg +21848.jpg +16899.jpg +10738.jpg +29467.jpg +20755.jpg +11433.jpg +17011.jpg +23220.jpg +9264.jpg +10433.jpg +15399.jpg +16977.jpg +29903.jpg +23830.jpg +11084.jpg +10032.jpg +16300.jpg +27962.jpg +2692.jpg +28923.jpg +24341.jpg +29055.jpg +21942.jpg +21433.jpg +27588.jpg +11766.jpg +1280.jpg +24982.jpg +21943.jpg +13240.jpg +14795.jpg +26012.jpg +19738.jpg +13645.jpg +826.jpg +12662.jpg +15128.jpg +14782.jpg +2532.jpg +5881.jpg +24863.jpg +2607.jpg +28630.jpg +24235.jpg +1561.jpg +6537.jpg +11711.jpg +18173.jpg +18781.jpg +27641.jpg +17409.jpg +4771.jpg +24855.jpg +29724.jpg +11591.jpg +24995.jpg +13405.jpg +2744.jpg +29540.jpg +9891.jpg +7345.jpg +27207.jpg +1896.jpg +7770.jpg +9090.jpg +28614.jpg +7004.jpg +3358.jpg +25466.jpg +23805.jpg +10089.jpg +1873.jpg +4646.jpg +19408.jpg +1052.jpg +3194.jpg +13167.jpg +3010.jpg +5897.jpg +29561.jpg +17323.jpg +28677.jpg +26368.jpg +16166.jpg +5507.jpg +8053.jpg +14549.jpg +21738.jpg +7448.jpg +9324.jpg +24246.jpg +19022.jpg +17503.jpg +7792.jpg +15207.jpg +20471.jpg +7986.jpg +20767.jpg +3712.jpg +19142.jpg +10995.jpg +15039.jpg +14481.jpg +5660.jpg +16398.jpg +21670.jpg +15830.jpg +4404.jpg +14093.jpg +26243.jpg +24038.jpg +4137.jpg +395.jpg +19264.jpg +7647.jpg +21193.jpg +15672.jpg +21147.jpg +10113.jpg +22128.jpg +25004.jpg +20710.jpg +18037.jpg +26191.jpg +25365.jpg +1490.jpg +6667.jpg +12287.jpg +10543.jpg +2064.jpg +8969.jpg +21546.jpg +9317.jpg +18793.jpg +1872.jpg +18240.jpg +15267.jpg +7163.jpg +22367.jpg +3857.jpg +24965.jpg +24804.jpg +13325.jpg +16642.jpg +25472.jpg +12619.jpg +7559.jpg +24294.jpg +4948.jpg +22377.jpg +24331.jpg +18617.jpg +27002.jpg +2877.jpg +5698.jpg +19999.jpg +13583.jpg +7280.jpg +1897.jpg +22047.jpg +10561.jpg +20682.jpg +26703.jpg +29999.jpg +26171.jpg +15187.jpg +10436.jpg +20768.jpg +667.jpg +11204.jpg +737.jpg +16000.jpg +3594.jpg +23032.jpg +21151.jpg +5761.jpg +14666.jpg +5851.jpg +4380.jpg +12470.jpg +24243.jpg +24802.jpg +14005.jpg +6616.jpg +13644.jpg +13661.jpg +460.jpg +1036.jpg +10098.jpg +17556.jpg +24637.jpg +838.jpg +24921.jpg +8899.jpg +2257.jpg +16930.jpg +12815.jpg +11093.jpg +5832.jpg +10615.jpg +21635.jpg +16818.jpg +23128.jpg +23887.jpg +29138.jpg +27333.jpg +10119.jpg +13104.jpg +26512.jpg +27688.jpg +25850.jpg +14278.jpg +18986.jpg +11454.jpg +3755.jpg +16629.jpg +28694.jpg +18223.jpg +20367.jpg +6153.jpg +6135.jpg +26330.jpg +22054.jpg +12674.jpg +4442.jpg +8094.jpg +5122.jpg +29852.jpg +27132.jpg +16040.jpg +8042.jpg +5052.jpg +4092.jpg +14530.jpg +11826.jpg +27238.jpg +17923.jpg +27097.jpg +26735.jpg +14417.jpg +25948.jpg +28935.jpg +17483.jpg +22063.jpg +18822.jpg +10216.jpg +29575.jpg +14719.jpg +20816.jpg +25109.jpg +14246.jpg +27422.jpg +6200.jpg +29784.jpg +5702.jpg +12595.jpg +7549.jpg +16750.jpg +19410.jpg +2973.jpg +23269.jpg +14881.jpg +90.jpg +22296.jpg +10865.jpg +9781.jpg +11081.jpg +4597.jpg +20284.jpg +8104.jpg +27982.jpg +833.jpg +17902.jpg +11310.jpg +13998.jpg +10753.jpg +22554.jpg +23216.jpg +20950.jpg +19303.jpg +4280.jpg +21582.jpg +19954.jpg +15597.jpg +12200.jpg +11811.jpg +1497.jpg +16109.jpg +19722.jpg +8996.jpg +25494.jpg +17493.jpg +10968.jpg +18752.jpg +25198.jpg +5248.jpg +11991.jpg +21477.jpg +23892.jpg +15361.jpg +5444.jpg +10318.jpg +7781.jpg +21327.jpg +16622.jpg +28511.jpg +6374.jpg +20260.jpg +695.jpg +10243.jpg +11324.jpg +10206.jpg +20099.jpg +7406.jpg +24150.jpg +8509.jpg +20577.jpg +20637.jpg +25484.jpg +9289.jpg +28648.jpg +3967.jpg +14844.jpg +26542.jpg +391.jpg +6464.jpg +13013.jpg +29136.jpg +29118.jpg +24826.jpg +13356.jpg +18950.jpg +22644.jpg +8467.jpg +9044.jpg +7843.jpg +17328.jpg +6372.jpg +2932.jpg +15167.jpg +14405.jpg +21489.jpg +21120.jpg +25350.jpg +29956.jpg +13196.jpg +5519.jpg +11944.jpg +26201.jpg +6845.jpg +3640.jpg +7436.jpg +252.jpg +5548.jpg +19040.jpg +16337.jpg +14126.jpg +9453.jpg +1344.jpg +12835.jpg +19186.jpg +8382.jpg +24487.jpg +3411.jpg +19661.jpg +28958.jpg +17425.jpg +18602.jpg +8486.jpg +16583.jpg +19423.jpg +18083.jpg +18293.jpg +2011.jpg +28434.jpg +3671.jpg +10735.jpg +22473.jpg +13149.jpg +15655.jpg +14834.jpg +11288.jpg +29880.jpg +12591.jpg +20883.jpg +26867.jpg +14343.jpg +10529.jpg +13244.jpg +19162.jpg +8038.jpg +8017.jpg +8070.jpg +28072.jpg +1108.jpg +29134.jpg +16391.jpg +21048.jpg +29071.jpg +3357.jpg +29988.jpg +15955.jpg +21328.jpg +876.jpg +24030.jpg +22505.jpg +21024.jpg +11723.jpg +26900.jpg +795.jpg +2283.jpg +25855.jpg +8248.jpg +17368.jpg +19533.jpg +24094.jpg +24848.jpg +4706.jpg +6067.jpg +14899.jpg +29472.jpg +8978.jpg +7550.jpg +4515.jpg +24857.jpg +3535.jpg +17621.jpg +12922.jpg +22005.jpg +10159.jpg +7290.jpg +7038.jpg +4215.jpg +4796.jpg +19456.jpg +23953.jpg +22895.jpg +4789.jpg +9309.jpg +23390.jpg +19851.jpg +10171.jpg +714.jpg +9538.jpg +2593.jpg +9275.jpg +15984.jpg +22634.jpg +23788.jpg +9402.jpg +25772.jpg +26905.jpg +5207.jpg +3741.jpg +10178.jpg +3762.jpg +28168.jpg +1050.jpg +17841.jpg +29021.jpg +20356.jpg +26032.jpg +20377.jpg +5211.jpg +5664.jpg +8020.jpg +1513.jpg +6979.jpg +26231.jpg +20770.jpg +28297.jpg +9708.jpg +18832.jpg +28091.jpg +5310.jpg +23071.jpg +10546.jpg +3151.jpg +3834.jpg +28976.jpg +23793.jpg +4664.jpg +4607.jpg +10792.jpg +12425.jpg +9813.jpg +22976.jpg +28749.jpg +403.jpg +6831.jpg +17656.jpg +2143.jpg +23207.jpg +7415.jpg +10820.jpg +12439.jpg +4522.jpg +12247.jpg +19538.jpg +21708.jpg +13678.jpg +10456.jpg +15350.jpg +25559.jpg +1565.jpg +15670.jpg +3378.jpg +27159.jpg +14144.jpg +18525.jpg +16133.jpg +15957.jpg +27507.jpg +16063.jpg +16003.jpg +15932.jpg +15020.jpg +25378.jpg +15811.jpg +5729.jpg +13978.jpg +4105.jpg +19382.jpg +25176.jpg +12781.jpg +15768.jpg +24931.jpg +11676.jpg +4769.jpg +2286.jpg +12670.jpg +24459.jpg +29704.jpg +28189.jpg +18651.jpg +13664.jpg +27874.jpg +23532.jpg +15254.jpg +22684.jpg +8578.jpg +28857.jpg +16304.jpg +12978.jpg +8214.jpg +24228.jpg +18522.jpg +26514.jpg +9042.jpg +14693.jpg +24481.jpg +1975.jpg +28266.jpg +1818.jpg +27402.jpg +29570.jpg +23418.jpg +22281.jpg +13054.jpg +28428.jpg +5649.jpg +1126.jpg +26061.jpg +23747.jpg +18218.jpg +29485.jpg +22706.jpg +28179.jpg +28765.jpg +24393.jpg +25173.jpg +21291.jpg +5977.jpg +8641.jpg +8914.jpg +953.jpg +21528.jpg +15718.jpg +20676.jpg +17382.jpg +15884.jpg +6065.jpg +7115.jpg +10204.jpg +12532.jpg +15061.jpg +18548.jpg +29565.jpg +14452.jpg +29301.jpg +21578.jpg +1656.jpg +3449.jpg +2791.jpg +25349.jpg +14591.jpg +2942.jpg +25759.jpg +27115.jpg +29421.jpg +8716.jpg +26954.jpg +1074.jpg +140.jpg +782.jpg +292.jpg +4504.jpg +1949.jpg +29629.jpg +23614.jpg +12519.jpg +3854.jpg +78.jpg +24455.jpg +9990.jpg +1170.jpg +16236.jpg +7181.jpg +25734.jpg +12273.jpg +6508.jpg +17692.jpg +9799.jpg +3272.jpg +25063.jpg +15225.jpg +28386.jpg +8051.jpg +9215.jpg +17925.jpg +16856.jpg +20818.jpg +3515.jpg +8274.jpg +284.jpg +27611.jpg +28905.jpg +23336.jpg +8740.jpg +26392.jpg +2688.jpg +22293.jpg +9750.jpg +21718.jpg +7747.jpg +5218.jpg +17935.jpg +6199.jpg +23668.jpg +27617.jpg +2279.jpg +13861.jpg +50.jpg +24138.jpg +9820.jpg +8352.jpg +12179.jpg +21014.jpg +13934.jpg +7452.jpg +25096.jpg +10259.jpg +8872.jpg +9924.jpg +20907.jpg +23874.jpg +7296.jpg +28309.jpg +5403.jpg +29514.jpg +12416.jpg +3739.jpg +27699.jpg +22191.jpg +16312.jpg +27820.jpg +11405.jpg +22379.jpg +21499.jpg +27452.jpg +1647.jpg +28839.jpg +18688.jpg +19189.jpg +119.jpg +8505.jpg +23046.jpg +25703.jpg +2058.jpg +6982.jpg +19901.jpg +23139.jpg +14340.jpg +22801.jpg +14814.jpg +16884.jpg +26979.jpg +6614.jpg +13008.jpg +13723.jpg +27482.jpg +26539.jpg +13034.jpg +15721.jpg +25957.jpg +1811.jpg +24089.jpg +6900.jpg +8845.jpg +20318.jpg +9622.jpg +27587.jpg +27720.jpg +10830.jpg +28134.jpg +1223.jpg +28160.jpg +29707.jpg +9007.jpg +25124.jpg +27502.jpg +18579.jpg +2396.jpg +9826.jpg +25916.jpg +28554.jpg +21948.jpg +17026.jpg +7704.jpg +6267.jpg +25298.jpg +12269.jpg +9129.jpg +23858.jpg +21142.jpg +484.jpg +5981.jpg +19767.jpg +7710.jpg +2172.jpg +11331.jpg +22259.jpg +28221.jpg +27339.jpg +12456.jpg +21395.jpg +1754.jpg +11191.jpg +5442.jpg +25328.jpg +16234.jpg +24727.jpg +10398.jpg +5481.jpg +4038.jpg +11771.jpg +810.jpg +9630.jpg +6405.jpg +8207.jpg +5750.jpg +7072.jpg +2855.jpg +18301.jpg +29912.jpg +17457.jpg +9325.jpg +4891.jpg +15068.jpg +5292.jpg +13284.jpg +13496.jpg +20106.jpg +11406.jpg +28013.jpg +3380.jpg +5388.jpg +22110.jpg +8295.jpg +24329.jpg +673.jpg +27697.jpg +6070.jpg +28001.jpg +23338.jpg +1757.jpg +28354.jpg +12459.jpg +6733.jpg +1838.jpg +22035.jpg +24186.jpg +22807.jpg +26845.jpg +17119.jpg +12598.jpg +2694.jpg +5095.jpg +3407.jpg +23664.jpg +24927.jpg +12219.jpg +7503.jpg +5026.jpg +16275.jpg +8659.jpg +19242.jpg +1966.jpg +16448.jpg +1003.jpg +21421.jpg +25465.jpg +3428.jpg +28353.jpg +9709.jpg +19209.jpg +24887.jpg +13022.jpg +26933.jpg +7953.jpg +14098.jpg +29917.jpg +29285.jpg +19359.jpg +3939.jpg +7801.jpg +10720.jpg +26016.jpg +9954.jpg +24721.jpg +15366.jpg +7305.jpg +28057.jpg +7.jpg +25185.jpg +1940.jpg +24432.jpg +19760.jpg +29630.jpg +21986.jpg +25308.jpg +20012.jpg +9848.jpg +16611.jpg +16914.jpg +18749.jpg +27480.jpg +16477.jpg +8627.jpg +12121.jpg +17023.jpg +18810.jpg +10506.jpg +3179.jpg +25743.jpg +16851.jpg +19932.jpg +27075.jpg +11646.jpg +23681.jpg +24388.jpg +19960.jpg +20234.jpg +28148.jpg +28606.jpg +7565.jpg +28636.jpg +20150.jpg +9354.jpg +2778.jpg +5387.jpg +7424.jpg +10078.jpg +14372.jpg +13208.jpg +25700.jpg +22152.jpg +21646.jpg +6319.jpg +7709.jpg +16299.jpg +1236.jpg +1079.jpg +26421.jpg +16269.jpg +23022.jpg +16048.jpg +14601.jpg +29074.jpg +4383.jpg +29039.jpg +3730.jpg +13173.jpg +7147.jpg +6173.jpg +26148.jpg +7043.jpg +13701.jpg +9099.jpg +7484.jpg +19367.jpg +6033.jpg +3545.jpg +3493.jpg +9331.jpg +18843.jpg +1713.jpg +26675.jpg +18866.jpg +9516.jpg +17370.jpg +19645.jpg +7037.jpg +24047.jpg +21969.jpg +9518.jpg +8495.jpg +14529.jpg +24864.jpg +14631.jpg +16553.jpg +15882.jpg +14476.jpg +10559.jpg +23460.jpg +5701.jpg +6992.jpg +23407.jpg +29035.jpg +7669.jpg +4931.jpg +17927.jpg +6697.jpg +23196.jpg +20104.jpg +17178.jpg +24470.jpg +25230.jpg +11834.jpg +1644.jpg +10581.jpg +24226.jpg +18897.jpg +10935.jpg +11803.jpg +5841.jpg +1860.jpg +11812.jpg +23446.jpg +26387.jpg +6884.jpg +18815.jpg +1982.jpg +6837.jpg +852.jpg +27195.jpg +12407.jpg +26657.jpg +26134.jpg +19007.jpg +244.jpg +20151.jpg +28626.jpg +4710.jpg +21182.jpg +25485.jpg +27832.jpg +2243.jpg +28260.jpg +24223.jpg +291.jpg +8380.jpg +18760.jpg +10851.jpg +10953.jpg +16759.jpg +13133.jpg +366.jpg +21511.jpg +15199.jpg +19530.jpg +13372.jpg +7040.jpg +29017.jpg +18468.jpg +5267.jpg +7946.jpg +12590.jpg +12897.jpg +10244.jpg +4765.jpg +24136.jpg +28594.jpg +15001.jpg +24061.jpg +16635.jpg +18603.jpg +8096.jpg +24147.jpg +2765.jpg +9138.jpg +23086.jpg +17625.jpg +7835.jpg +4698.jpg +23260.jpg +8300.jpg +22670.jpg +21619.jpg +28731.jpg +4078.jpg +12195.jpg +689.jpg +3452.jpg +4229.jpg +8286.jpg +29030.jpg +7990.jpg +28236.jpg +4848.jpg +26262.jpg +7820.jpg +21488.jpg +13918.jpg +4897.jpg +6280.jpg +5449.jpg +7306.jpg +15604.jpg +17990.jpg +26081.jpg +13255.jpg +22520.jpg +29694.jpg +3643.jpg +16770.jpg +13458.jpg +29688.jpg +13816.jpg +24665.jpg +1293.jpg +6086.jpg +23555.jpg +7293.jpg +29293.jpg +15845.jpg +22163.jpg +17013.jpg +18170.jpg +17864.jpg +27023.jpg +28379.jpg +10825.jpg +12909.jpg +28436.jpg +28653.jpg +5134.jpg +25126.jpg +23473.jpg +19891.jpg +29744.jpg +24229.jpg +27209.jpg +2560.jpg +7579.jpg +5667.jpg +223.jpg +6693.jpg +7049.jpg +28774.jpg +18540.jpg +18099.jpg +1393.jpg +9323.jpg +11215.jpg +2015.jpg +25764.jpg +16821.jpg +23682.jpg +8603.jpg +26395.jpg +25583.jpg +8233.jpg +7601.jpg +9559.jpg +16614.jpg +22309.jpg +3110.jpg +6190.jpg +24051.jpg +6789.jpg +10714.jpg +21767.jpg +977.jpg +21643.jpg +16946.jpg +7185.jpg +9562.jpg +8936.jpg +5325.jpg +17366.jpg +19052.jpg +23591.jpg +18006.jpg +28628.jpg +1802.jpg +18192.jpg +4844.jpg +27279.jpg +14118.jpg +27121.jpg +10139.jpg +10160.jpg +17360.jpg +13087.jpg +28417.jpg +1968.jpg +18561.jpg +21379.jpg +339.jpg +29499.jpg +3341.jpg +18153.jpg +22550.jpg +17600.jpg +26727.jpg +20058.jpg +16436.jpg +12098.jpg +15989.jpg +10944.jpg +5255.jpg +13681.jpg +24207.jpg +24799.jpg +28620.jpg +28132.jpg +3713.jpg +17455.jpg +22701.jpg +23327.jpg +12960.jpg +2481.jpg +3327.jpg +8956.jpg +327.jpg +11392.jpg +16486.jpg +10144.jpg +26463.jpg +4867.jpg +7192.jpg +4692.jpg +20081.jpg +12101.jpg +29067.jpg +1548.jpg +23649.jpg +993.jpg +19727.jpg +5225.jpg +28119.jpg +11258.jpg +24845.jpg +1670.jpg +4017.jpg +18884.jpg +10057.jpg +22763.jpg +6895.jpg +16379.jpg +20379.jpg +6566.jpg +20552.jpg +28703.jpg +3440.jpg +9894.jpg +22312.jpg +13568.jpg +26548.jpg +2344.jpg +4342.jpg +29389.jpg +1496.jpg +6666.jpg +4823.jpg +25481.jpg +862.jpg +13711.jpg +23383.jpg +17886.jpg +21446.jpg +21774.jpg +5324.jpg +7230.jpg +18859.jpg +1691.jpg +16446.jpg +13973.jpg +24071.jpg +11518.jpg +14194.jpg +20226.jpg +9143.jpg +18780.jpg +2878.jpg +12963.jpg +1914.jpg +9231.jpg +26014.jpg +18200.jpg +10533.jpg +9785.jpg +25653.jpg +13422.jpg +24808.jpg +24306.jpg +1908.jpg +22189.jpg +4944.jpg +18227.jpg +13997.jpg +10606.jpg +14078.jpg +11054.jpg +26605.jpg +20501.jpg +1148.jpg +17727.jpg +21207.jpg +14315.jpg +3756.jpg +26154.jpg +626.jpg +7769.jpg +16136.jpg +22541.jpg +29315.jpg +14771.jpg +21262.jpg +11323.jpg +22955.jpg +20726.jpg +10734.jpg +29837.jpg +16663.jpg +4591.jpg +20033.jpg +13974.jpg +12113.jpg +3824.jpg +29834.jpg +14522.jpg +18129.jpg +14208.jpg +7408.jpg +16693.jpg +26811.jpg +23202.jpg +7289.jpg +9782.jpg +5004.jpg +14049.jpg +6113.jpg +20698.jpg +13604.jpg +19540.jpg +2063.jpg +4809.jpg +22606.jpg +25798.jpg +16911.jpg +5683.jpg +17862.jpg +28021.jpg +27352.jpg +17626.jpg +8444.jpg +25986.jpg +25046.jpg +12768.jpg +24449.jpg +15949.jpg +15259.jpg +27707.jpg +20562.jpg +7346.jpg +18174.jpg +12747.jpg +22927.jpg +10045.jpg +12716.jpg +21448.jpg +13836.jpg +16849.jpg +7060.jpg +15926.jpg +29457.jpg +10949.jpg +6687.jpg +8751.jpg +17977.jpg +6772.jpg +6031.jpg +1879.jpg +29332.jpg +503.jpg +29676.jpg +11417.jpg +6671.jpg +15049.jpg +3085.jpg +10100.jpg +1752.jpg +7403.jpg +14034.jpg +5068.jpg +20030.jpg +22428.jpg +21901.jpg +14007.jpg +28475.jpg +18332.jpg +21883.jpg +7573.jpg +10462.jpg +25052.jpg +12307.jpg +21107.jpg +9315.jpg +17309.jpg +16050.jpg +19596.jpg +17324.jpg +5767.jpg +14298.jpg +1595.jpg +17184.jpg +13702.jpg +10974.jpg +7582.jpg +28552.jpg +27540.jpg +17373.jpg +11586.jpg +26647.jpg +8489.jpg +13670.jpg +20984.jpg +11422.jpg +27971.jpg +1064.jpg +22923.jpg +6075.jpg +28866.jpg +1157.jpg +9745.jpg +17972.jpg +20250.jpg +16624.jpg +10263.jpg +28979.jpg +16867.jpg +19252.jpg +12658.jpg +16564.jpg +24641.jpg +9876.jpg +14479.jpg +2354.jpg +14879.jpg +15853.jpg +23266.jpg +6098.jpg +24068.jpg +3384.jpg +7650.jpg +19154.jpg +17607.jpg +28271.jpg +12905.jpg +27515.jpg +22755.jpg +3269.jpg +8241.jpg +9425.jpg +15139.jpg +24618.jpg +14094.jpg +9431.jpg +19057.jpg +9702.jpg +21981.jpg +25154.jpg +857.jpg +26269.jpg +28465.jpg +5633.jpg +21337.jpg +6707.jpg +5022.jpg +5027.jpg +28046.jpg +5487.jpg +23984.jpg +11051.jpg +22394.jpg +12352.jpg +1015.jpg +21339.jpg +25519.jpg +17538.jpg +26317.jpg +6328.jpg +5151.jpg +18188.jpg +27037.jpg +29691.jpg +13265.jpg +2633.jpg +723.jpg +27987.jpg +23762.jpg +16320.jpg +1759.jpg +11270.jpg +29805.jpg +21575.jpg +12178.jpg +26894.jpg +3048.jpg +19469.jpg +11874.jpg +11563.jpg +20678.jpg +15386.jpg +18998.jpg +23517.jpg +15331.jpg +28522.jpg +21936.jpg +29828.jpg +13033.jpg +25873.jpg +18071.jpg +18171.jpg +5779.jpg +245.jpg +14872.jpg +26420.jpg +5801.jpg +8988.jpg +18829.jpg +25413.jpg +18347.jpg +19194.jpg +27788.jpg +1194.jpg +20785.jpg +19363.jpg +28829.jpg +20784.jpg +26699.jpg +21939.jpg +6901.jpg +7463.jpg +21764.jpg +21535.jpg +4493.jpg +21092.jpg +11870.jpg +5270.jpg +1560.jpg +11396.jpg +10801.jpg +11701.jpg +14074.jpg +25271.jpg +16549.jpg +21829.jpg +19719.jpg +19354.jpg +28634.jpg +19421.jpg +29062.jpg +25901.jpg +8646.jpg +13141.jpg +20566.jpg +27750.jpg +7923.jpg +3062.jpg +24.jpg +26998.jpg +24269.jpg +629.jpg +14540.jpg +7513.jpg +2708.jpg +28360.jpg +11989.jpg +9094.jpg +19569.jpg +23489.jpg +14939.jpg +7698.jpg +27362.jpg +28375.jpg +7119.jpg +17331.jpg +21205.jpg +1825.jpg +18827.jpg +2953.jpg +21532.jpg +24576.jpg +12790.jpg +5680.jpg +21962.jpg +22820.jpg +22521.jpg +17831.jpg +5112.jpg +3477.jpg +17263.jpg +26355.jpg +24076.jpg +11640.jpg +25642.jpg +14489.jpg +10575.jpg +7164.jpg +983.jpg +15659.jpg +12092.jpg +15180.jpg +1447.jpg +4492.jpg +1954.jpg +15021.jpg +20400.jpg +20282.jpg +7342.jpg +19812.jpg +24850.jpg +4490.jpg +15433.jpg +13332.jpg +13199.jpg +12665.jpg +21746.jpg +9912.jpg +26669.jpg +27317.jpg +5396.jpg +5773.jpg +28545.jpg +27806.jpg +3869.jpg +29849.jpg +22122.jpg +11430.jpg +5019.jpg +8194.jpg +7870.jpg +2101.jpg +24605.jpg +532.jpg +9001.jpg +28744.jpg +14701.jpg +22055.jpg +27162.jpg +12035.jpg +20934.jpg +28652.jpg +25553.jpg +24953.jpg +7080.jpg +19364.jpg +13517.jpg +15175.jpg +24809.jpg +13526.jpg +74.jpg +26460.jpg +6660.jpg +113.jpg +2219.jpg +19502.jpg +13908.jpg +2832.jpg +13158.jpg +23516.jpg +4361.jpg +7028.jpg +6659.jpg +23907.jpg +16311.jpg +16848.jpg +9412.jpg +9399.jpg +9655.jpg +4766.jpg +3937.jpg +8703.jpg +25771.jpg +5261.jpg +16708.jpg +23267.jpg +10841.jpg +3572.jpg +13130.jpg +25091.jpg +25711.jpg +20363.jpg +10415.jpg +29460.jpg +10183.jpg +7665.jpg +12578.jpg +17206.jpg +18101.jpg +28535.jpg +1535.jpg +9632.jpg +29539.jpg +5495.jpg +23345.jpg +1635.jpg +18401.jpg +572.jpg +14091.jpg +18833.jpg +20319.jpg +24485.jpg +28639.jpg +28373.jpg +663.jpg +18888.jpg +14682.jpg +4450.jpg +29444.jpg +188.jpg +8924.jpg +22977.jpg +13524.jpg +26517.jpg +4901.jpg +27360.jpg +5220.jpg +18806.jpg +25227.jpg +24382.jpg +8783.jpg +22703.jpg +20192.jpg +1370.jpg +21280.jpg +16084.jpg +19355.jpg +4190.jpg +11665.jpg +2747.jpg +17471.jpg +15310.jpg +10279.jpg +5963.jpg +25005.jpg +12614.jpg +8885.jpg +19294.jpg +6078.jpg +16846.jpg +17781.jpg +24245.jpg +8014.jpg +15328.jpg +10633.jpg +21889.jpg +882.jpg +4677.jpg +28442.jpg +8062.jpg +21776.jpg +21808.jpg +26263.jpg +4929.jpg +22150.jpg +28020.jpg +28717.jpg +11872.jpg +20847.jpg +27383.jpg +24239.jpg +28012.jpg +3910.jpg +18028.jpg +3689.jpg +1265.jpg +8125.jpg +26028.jpg +11479.jpg +10874.jpg +25886.jpg +24457.jpg +12387.jpg +22046.jpg +23421.jpg +27462.jpg +17965.jpg +20109.jpg +25062.jpg +19770.jpg +15488.jpg +6224.jpg +6563.jpg +14099.jpg +17584.jpg +19024.jpg +9213.jpg +18438.jpg +885.jpg +29814.jpg +1666.jpg +19697.jpg +12249.jpg +10668.jpg +7779.jpg +21726.jpg +19483.jpg +12148.jpg +11621.jpg +5364.jpg +4028.jpg +20743.jpg +25709.jpg +11861.jpg +9852.jpg +1279.jpg +22666.jpg +7973.jpg +22185.jpg +18093.jpg +942.jpg +6234.jpg +19121.jpg +16647.jpg +4788.jpg +6803.jpg +22662.jpg +25575.jpg +25266.jpg +11098.jpg +29068.jpg +9133.jpg +29415.jpg +24205.jpg +21569.jpg +9600.jpg +10161.jpg +21536.jpg +8453.jpg +27728.jpg +24693.jpg +3918.jpg +19405.jpg +16155.jpg +3720.jpg +11314.jpg +15317.jpg +5826.jpg +5118.jpg +27319.jpg +2125.jpg +5602.jpg +7353.jpg +1765.jpg +18322.jpg +4642.jpg +28791.jpg +25528.jpg +13545.jpg +3783.jpg +1725.jpg +2022.jpg +11079.jpg +28959.jpg +13802.jpg +10930.jpg +13550.jpg +11486.jpg +9244.jpg +24668.jpg +16149.jpg +1130.jpg +3785.jpg +1596.jpg +15546.jpg +3101.jpg +20972.jpg +28558.jpg +25078.jpg +18025.jpg +28036.jpg +2756.jpg +6936.jpg +10501.jpg +16514.jpg +29368.jpg +19342.jpg +16362.jpg +27918.jpg +3952.jpg +18352.jpg +10321.jpg +2639.jpg +21592.jpg +7220.jpg +14500.jpg +5219.jpg +1769.jpg +1590.jpg +16253.jpg +18798.jpg +22918.jpg +15632.jpg +23090.jpg +22383.jpg +29253.jpg +25928.jpg +14227.jpg +13722.jpg +15418.jpg +3963.jpg +13384.jpg +21754.jpg +19658.jpg +5133.jpg +21426.jpg +20417.jpg +3480.jpg +11692.jpg +27795.jpg +3430.jpg +28794.jpg +12573.jpg +17294.jpg +8793.jpg +27810.jpg +18919.jpg +15647.jpg +8245.jpg +12235.jpg +13774.jpg +16557.jpg +28494.jpg +20158.jpg +18755.jpg +13751.jpg +5351.jpg +3505.jpg +15857.jpg +22484.jpg +28610.jpg +2780.jpg +4630.jpg +22780.jpg +10484.jpg +5051.jpg +5707.jpg +11915.jpg +27348.jpg +19203.jpg +22717.jpg +23285.jpg +28887.jpg +6354.jpg +23718.jpg +22267.jpg +25555.jpg +15081.jpg +17752.jpg +28157.jpg +11686.jpg +13365.jpg +5755.jpg +14607.jpg +15213.jpg +24446.jpg +13915.jpg +22593.jpg +12344.jpg +13542.jpg +24702.jpg +29820.jpg +29310.jpg +14916.jpg +26617.jpg +22850.jpg +29054.jpg +11608.jpg +2378.jpg +15754.jpg +29209.jpg +3037.jpg +29714.jpg +8369.jpg +29850.jpg +11857.jpg +22947.jpg +14191.jpg +22003.jpg +2612.jpg +25267.jpg +7165.jpg +5165.jpg +29252.jpg +24377.jpg +4389.jpg +13142.jpg +16789.jpg +28507.jpg +1711.jpg +6603.jpg +19959.jpg +15286.jpg +1205.jpg +19494.jpg +9870.jpg +23037.jpg +8582.jpg +23684.jpg +12411.jpg +4743.jpg +16319.jpg +1755.jpg +7129.jpg +7242.jpg +13258.jpg +25675.jpg +1566.jpg +15890.jpg +20892.jpg +25809.jpg +12620.jpg +25100.jpg +11108.jpg +775.jpg +625.jpg +308.jpg +4567.jpg +1109.jpg +6763.jpg +13981.jpg +9605.jpg +22984.jpg +82.jpg +10819.jpg +9855.jpg +5760.jpg +23837.jpg +13651.jpg +3396.jpg +10548.jpg +27814.jpg +719.jpg +2636.jpg +490.jpg +14907.jpg +15323.jpg +17652.jpg +14742.jpg +22385.jpg +6919.jpg +20776.jpg +5347.jpg +10033.jpg +4961.jpg +7652.jpg +22728.jpg +22085.jpg +25384.jpg +28208.jpg +25193.jpg +16889.jpg +4094.jpg +4167.jpg +28966.jpg +25606.jpg +27695.jpg +26143.jpg +17603.jpg +8221.jpg +11659.jpg +8709.jpg +12818.jpg +19480.jpg +29239.jpg +22275.jpg +5524.jpg +21863.jpg +23656.jpg +15344.jpg +8804.jpg +11025.jpg +21824.jpg +19373.jpg +27533.jpg +6195.jpg +11992.jpg +19280.jpg +11809.jpg +17015.jpg +3645.jpg +2653.jpg +20137.jpg +27826.jpg +19129.jpg +6387.jpg +16717.jpg +16010.jpg +6278.jpg +15631.jpg +14734.jpg +20457.jpg +10725.jpg +22822.jpg +15964.jpg +18647.jpg +2706.jpg +26020.jpg +17785.jpg +24735.jpg +8957.jpg +7642.jpg +22491.jpg +14592.jpg +25253.jpg +12870.jpg +21954.jpg +26896.jpg +10984.jpg +11969.jpg +5117.jpg +12750.jpg +15532.jpg +27909.jpg +1111.jpg +19797.jpg +2318.jpg +6652.jpg +9238.jpg +17082.jpg +106.jpg +24810.jpg +16399.jpg +1604.jpg +18337.jpg +17449.jpg +26877.jpg +1946.jpg +3436.jpg +11131.jpg +9760.jpg +5459.jpg +5926.jpg +20143.jpg +26940.jpg +28087.jpg +29904.jpg +21817.jpg +27197.jpg +28651.jpg +17918.jpg +10915.jpg +9908.jpg +21403.jpg +10444.jpg +15781.jpg +25932.jpg +22619.jpg +14012.jpg +13553.jpg +16528.jpg +16602.jpg +10202.jpg +14472.jpg +25818.jpg +24034.jpg +5933.jpg +26495.jpg +13238.jpg +9124.jpg +22400.jpg +23623.jpg +26847.jpg +11461.jpg +891.jpg +7921.jpg +29962.jpg +24994.jpg +27229.jpg +6276.jpg +7067.jpg +29453.jpg +28676.jpg +9860.jpg +712.jpg +2867.jpg +13423.jpg +27676.jpg +20057.jpg +29363.jpg +19500.jpg +20595.jpg +19695.jpg +1092.jpg +2370.jpg +1523.jpg +17654.jpg +8745.jpg +2898.jpg +21908.jpg +16850.jpg +27916.jpg +28850.jpg +4968.jpg +26840.jpg +422.jpg +1567.jpg +10630.jpg +8700.jpg +433.jpg +27940.jpg +5226.jpg +23736.jpg +26273.jpg +20076.jpg +5257.jpg +16171.jpg +22350.jpg +6658.jpg +25464.jpg +7422.jpg +24162.jpg +734.jpg +12567.jpg +6071.jpg +16204.jpg +15794.jpg +18442.jpg +12242.jpg +8471.jpg +6051.jpg +25947.jpg +1924.jpg +26053.jpg +19230.jpg +10320.jpg +26511.jpg +1346.jpg +5462.jpg +1616.jpg +24474.jpg +791.jpg +26529.jpg +23916.jpg +9119.jpg +8676.jpg +17046.jpg +2754.jpg +9191.jpg +9561.jpg +2208.jpg +21013.jpg +2050.jpg +26808.jpg +9466.jpg +21237.jpg +11906.jpg +17666.jpg +7826.jpg +18620.jpg +19175.jpg +10597.jpg +18607.jpg +29170.jpg +10452.jpg +10107.jpg +27203.jpg +4782.jpg +14665.jpg +29206.jpg +5864.jpg +10712.jpg +12201.jpg +1439.jpg +13698.jpg +8410.jpg +8930.jpg +8545.jpg +8943.jpg +5168.jpg +20970.jpg +20190.jpg +10757.jpg +28750.jpg +1618.jpg +20140.jpg +5262.jpg +17851.jpg +3660.jpg +6198.jpg +10590.jpg +27822.jpg +10762.jpg +4566.jpg +961.jpg +28274.jpg +26910.jpg +5821.jpg +1374.jpg +6446.jpg +23794.jpg +21090.jpg +23272.jpg +12132.jpg +4611.jpg +7234.jpg +577.jpg +27155.jpg +29582.jpg +9183.jpg +27437.jpg +11251.jpg +219.jpg +17194.jpg +25525.jpg +6441.jpg +16919.jpg +8695.jpg +1506.jpg +13838.jpg +5617.jpg +24424.jpg +15392.jpg +12692.jpg +5384.jpg +15030.jpg +6119.jpg +389.jpg +7948.jpg +1936.jpg +14733.jpg +2345.jpg +1160.jpg +15255.jpg +3198.jpg +7833.jpg +11485.jpg +27691.jpg +18203.jpg +18091.jpg +8953.jpg +11793.jpg +2533.jpg +27590.jpg +11525.jpg +23716.jpg +2770.jpg +28212.jpg +6207.jpg +25924.jpg +23806.jpg +13098.jpg +2555.jpg +15674.jpg +19577.jpg +289.jpg +6434.jpg +21001.jpg +9968.jpg +9477.jpg +5872.jpg +24575.jpg +18730.jpg +146.jpg +8678.jpg +28075.jpg +15494.jpg +27775.jpg +16211.jpg +9967.jpg +2332.jpg +4072.jpg +29113.jpg +6788.jpg +25918.jpg +3146.jpg +14104.jpg +24918.jpg +20245.jpg +5065.jpg +393.jpg +25104.jpg +20941.jpg +12829.jpg +29848.jpg +3234.jpg +2364.jpg +19984.jpg +14573.jpg +25572.jpg +29247.jpg +21999.jpg +11852.jpg +23914.jpg +19714.jpg +13652.jpg +1278.jpg +7166.jpg +8111.jpg +26404.jpg +15783.jpg +24519.jpg +16297.jpg +27543.jpg +28303.jpg +7906.jpg +26691.jpg +70.jpg +15663.jpg +9613.jpg +17874.jpg +16710.jpg +28977.jpg +27961.jpg +2302.jpg +7319.jpg +1681.jpg +9633.jpg +20209.jpg +28121.jpg +10231.jpg +20667.jpg +2002.jpg +20075.jpg +27976.jpg +1359.jpg +7697.jpg +29878.jpg +9299.jpg +2084.jpg +11192.jpg +15031.jpg +1014.jpg +9525.jpg +17585.jpg +27342.jpg +11720.jpg +24340.jpg +10282.jpg +12640.jpg +20838.jpg +8074.jpg +26422.jpg +27268.jpg +7654.jpg +12161.jpg +4172.jpg +3677.jpg +27315.jpg +8739.jpg +4639.jpg +1837.jpg +2716.jpg +28112.jpg +12318.jpg +27542.jpg +10236.jpg +29297.jpg +23312.jpg +26633.jpg +28350.jpg +29431.jpg +23924.jpg +6570.jpg +1575.jpg +10576.jpg +25038.jpg +18799.jpg +8894.jpg +24648.jpg +21075.jpg +16616.jpg +12041.jpg +21319.jpg +29758.jpg +2277.jpg +27044.jpg +19723.jpg +16167.jpg +2535.jpg +9117.jpg +2841.jpg +28405.jpg +10363.jpg +8621.jpg +13672.jpg +22737.jpg +28872.jpg +29056.jpg +24617.jpg +2771.jpg +23332.jpg +27877.jpg +26883.jpg +22482.jpg +7945.jpg +10838.jpg +24960.jpg +11952.jpg +19542.jpg +27747.jpg +19403.jpg +13521.jpg +28968.jpg +3174.jpg +21630.jpg +2131.jpg +5799.jpg +16281.jpg +5279.jpg +7529.jpg +27013.jpg +3764.jpg +20063.jpg +29303.jpg +5483.jpg +10690.jpg +28060.jpg +29125.jpg +17106.jpg +16722.jpg +20310.jpg +21450.jpg +25770.jpg +23870.jpg +14708.jpg +7804.jpg +4667.jpg +14084.jpg +22919.jpg +11497.jpg +2463.jpg +29656.jpg +16240.jpg +26153.jpg +24398.jpg +17239.jpg +10566.jpg +29672.jpg +7205.jpg +28609.jpg +13994.jpg +18049.jpg +26071.jpg +26705.jpg +12726.jpg +7108.jpg +17788.jpg +6130.jpg +25836.jpg +13293.jpg +2407.jpg +522.jpg +16688.jpg +21419.jpg +24774.jpg +22581.jpg +3053.jpg +5056.jpg +8580.jpg +13764.jpg +1390.jpg +9239.jpg +14292.jpg +2625.jpg +774.jpg +21977.jpg +27316.jpg +16242.jpg +5343.jpg +20498.jpg +7261.jpg +371.jpg +8942.jpg +1868.jpg +5005.jpg +15.jpg +15858.jpg +29983.jpg +24335.jpg +14655.jpg +10052.jpg +13676.jpg +8498.jpg +20186.jpg +13298.jpg +135.jpg +13881.jpg +746.jpg +7928.jpg +29328.jpg +23018.jpg +21793.jpg +25805.jpg +10534.jpg +18912.jpg +5812.jpg +9141.jpg +2714.jpg +29703.jpg +16126.jpg +29060.jpg +17441.jpg +4052.jpg +22271.jpg +23900.jpg +12012.jpg +22911.jpg +18237.jpg +29005.jpg +27919.jpg +28216.jpg +24770.jpg +20807.jpg +28761.jpg +9288.jpg +2782.jpg +4513.jpg +27829.jpg +8063.jpg +8441.jpg +3780.jpg +290.jpg +18387.jpg +3232.jpg +10059.jpg +23132.jpg +25931.jpg +20835.jpg +13074.jpg +6801.jpg +27528.jpg +614.jpg +10094.jpg +1790.jpg +6686.jpg +3959.jpg +19379.jpg +4091.jpg +9660.jpg +14212.jpg +10631.jpg +22957.jpg +22178.jpg +28449.jpg +2947.jpg +18349.jpg +10843.jpg +7359.jpg +22198.jpg +23710.jpg +15512.jpg +9948.jpg +4476.jpg +1270.jpg +29352.jpg +5739.jpg +29157.jpg +14260.jpg +23478.jpg +26300.jpg +19289.jpg +26159.jpg +8670.jpg +18849.jpg +9541.jpg +24914.jpg +13875.jpg +20845.jpg +16158.jpg +10835.jpg +10932.jpg +18908.jpg +28678.jpg +29924.jpg +9725.jpg +4963.jpg +21898.jpg +7059.jpg +3495.jpg +246.jpg +1114.jpg +17967.jpg +27261.jpg +23952.jpg +865.jpg +4842.jpg +8993.jpg +17853.jpg +29671.jpg +6417.jpg +15435.jpg +3642.jpg +27891.jpg +17992.jpg +13515.jpg +9618.jpg +12334.jpg +20803.jpg +29437.jpg +16122.jpg +28834.jpg +5231.jpg +11273.jpg +12180.jpg +24458.jpg +27289.jpg +18308.jpg +2920.jpg +14350.jpg +8862.jpg +14279.jpg +17814.jpg +28099.jpg +7505.jpg +28538.jpg +16928.jpg +8075.jpg +6342.jpg +22950.jpg +19043.jpg +1948.jpg +14482.jpg +14072.jpg +4499.jpg +443.jpg +29149.jpg +26934.jpg +21594.jpg +5048.jpg +15048.jpg +4977.jpg +3957.jpg +12393.jpg +2766.jpg +411.jpg +21886.jpg +10959.jpg +16805.jpg +17086.jpg +5719.jpg +4300.jpg +28663.jpg +24730.jpg +10233.jpg +12511.jpg +3916.jpg +4168.jpg +1768.jpg +5592.jpg +29346.jpg +12838.jpg +11032.jpg +24385.jpg +6255.jpg +14328.jpg +9787.jpg +5590.jpg +8564.jpg +17942.jpg +7717.jpg +20425.jpg +15058.jpg +11511.jpg +16542.jpg +28580.jpg +3138.jpg +243.jpg +23333.jpg +1764.jpg +8430.jpg +29831.jpg +6643.jpg +14632.jpg +18636.jpg +27687.jpg +8660.jpg +18566.jpg +14517.jpg +1304.jpg +26819.jpg +3521.jpg +16601.jpg +14139.jpg +3919.jpg +16744.jpg +16093.jpg +27921.jpg +27654.jpg +8890.jpg +24361.jpg +10482.jpg +20298.jpg +18740.jpg +10044.jpg +17052.jpg +13528.jpg +6862.jpg +1008.jpg +10286.jpg +15669.jpg +9411.jpg +27179.jpg +22863.jpg +7244.jpg +24919.jpg +19104.jpg +26179.jpg +22869.jpg +5144.jpg +4117.jpg +23859.jpg +19245.jpg +22318.jpg +20780.jpg +25293.jpg +15838.jpg +20921.jpg +2053.jpg +25937.jpg +26680.jpg +13922.jpg +14494.jpg +12749.jpg +12135.jpg +18505.jpg +26864.jpg +24692.jpg +12937.jpg +24462.jpg +22630.jpg +27418.jpg +3359.jpg +3152.jpg +24324.jpg +17310.jpg +8302.jpg +2036.jpg +26609.jpg +7466.jpg +8097.jpg +26265.jpg +2304.jpg +17122.jpg +22196.jpg +10941.jpg +2500.jpg +7232.jpg +18581.jpg +19750.jpg +123.jpg +4157.jpg +5840.jpg +28427.jpg +1425.jpg +28370.jpg +22668.jpg +4080.jpg +19039.jpg +15388.jpg +11224.jpg +11160.jpg +7841.jpg +23221.jpg +13358.jpg +8766.jpg +20338.jpg +23685.jpg +4613.jpg +25117.jpg +28838.jpg +6954.jpg +11384.jpg +12383.jpg +28754.jpg +13770.jpg +8569.jpg +28818.jpg +29715.jpg +28223.jpg +12847.jpg +20520.jpg +12771.jpg +15973.jpg +447.jpg +21275.jpg +440.jpg +516.jpg +23172.jpg +22301.jpg +25257.jpg +3157.jpg +15121.jpg +32.jpg +11208.jpg +17982.jpg +14624.jpg +20559.jpg +27475.jpg +17416.jpg +7931.jpg +25499.jpg +25905.jpg +7479.jpg +1204.jpg +5565.jpg +23841.jpg +26685.jpg +2643.jpg +18865.jpg +109.jpg +25976.jpg +7257.jpg +25236.jpg +15229.jpg +4870.jpg +18696.jpg +14272.jpg +95.jpg +13863.jpg +18549.jpg +11927.jpg +17617.jpg +4143.jpg +29061.jpg +357.jpg +12663.jpg +24673.jpg +12159.jpg +22775.jpg +7193.jpg +17400.jpg +1942.jpg +24368.jpg +14414.jpg +25341.jpg +1731.jpg +15849.jpg +25891.jpg +17780.jpg +24256.jpg +9910.jpg +10650.jpg +3974.jpg +17733.jpg +24615.jpg +25800.jpg +27260.jpg +11745.jpg +26595.jpg +26534.jpg +10037.jpg +13948.jpg +29187.jpg +16666.jpg +4495.jpg +18081.jpg +6040.jpg +23980.jpg +18278.jpg +25522.jpg +15866.jpg +28345.jpg +17383.jpg +6941.jpg +23773.jpg +17113.jpg +17835.jpg +29982.jpg +5611.jpg +23704.jpg +13072.jpg +21405.jpg +7799.jpg +9284.jpg +14700.jpg +17099.jpg +16393.jpg +416.jpg +8473.jpg +3654.jpg +23819.jpg +20432.jpg +5717.jpg +15576.jpg +26112.jpg +3303.jpg +9610.jpg +23553.jpg +10736.jpg +23351.jpg +15712.jpg +17802.jpg +9029.jpg +28721.jpg +16217.jpg +24412.jpg +10883.jpg +5014.jpg +4375.jpg +4814.jpg +13096.jpg +20693.jpg +11105.jpg +28629.jpg +2231.jpg +6189.jpg +23922.jpg +11476.jpg +27817.jpg +3819.jpg +19365.jpg +14761.jpg +1846.jpg +13410.jpg +3466.jpg +12068.jpg +7723.jpg +8119.jpg +10927.jpg +13508.jpg +1034.jpg +29108.jpg +22738.jpg +10102.jpg +14605.jpg +6749.jpg +5143.jpg +3391.jpg +7842.jpg +883.jpg +4665.jpg +2900.jpg +9564.jpg +20176.jpg +1398.jpg +28899.jpg +25623.jpg +21845.jpg +11141.jpg +22990.jpg +27870.jpg +1910.jpg +23127.jpg +51.jpg +7824.jpg +29179.jpg +21028.jpg +24518.jpg +16597.jpg +23414.jpg +16630.jpg +21208.jpg +8677.jpg +21946.jpg +15694.jpg +28276.jpg +10349.jpg +16739.jpg +19368.jpg +11568.jpg +16715.jpg +15179.jpg +23429.jpg +15475.jpg +3034.jpg +17071.jpg +928.jpg +13473.jpg +16366.jpg +18165.jpg +9808.jpg +17010.jpg +22538.jpg +28710.jpg +15035.jpg +27613.jpg +441.jpg +6037.jpg +25797.jpg +15796.jpg +1685.jpg +5599.jpg +5322.jpg +22972.jpg +13696.jpg +3164.jpg +3071.jpg +11844.jpg +8167.jpg +24036.jpg +21725.jpg +11074.jpg +11579.jpg +7007.jpg +21461.jpg +8791.jpg +25865.jpg +20312.jpg +20969.jpg +23828.jpg +11538.jpg +22416.jpg +6165.jpg +22567.jpg +12410.jpg +29953.jpg +2572.jpg +13885.jpg +5061.jpg +18586.jpg +20832.jpg +24264.jpg +13045.jpg +9085.jpg +2697.jpg +11895.jpg +18385.jpg +28215.jpg +29812.jpg +29898.jpg +24273.jpg +24098.jpg +26183.jpg +1046.jpg +7386.jpg +23531.jpg +22638.jpg +28705.jpg +13494.jpg +9319.jpg +16147.jpg +20041.jpg +1268.jpg +6115.jpg +11768.jpg +2815.jpg +1539.jpg +25620.jpg +16346.jpg +20308.jpg +12218.jpg +8864.jpg +26531.jpg +11934.jpg +561.jpg +15701.jpg +27110.jpg +7307.jpg +7960.jpg +4112.jpg +16326.jpg +4213.jpg +240.jpg +17681.jpg +20488.jpg +7226.jpg +1354.jpg +28302.jpg +10426.jpg +3628.jpg +25846.jpg +4368.jpg +10907.jpg +22517.jpg +22138.jpg +13431.jpg +4784.jpg +25101.jpg +11063.jpg +22459.jpg +11987.jpg +9501.jpg +16081.jpg +28740.jpg +16108.jpg +29942.jpg +8133.jpg +17829.jpg +26941.jpg +22115.jpg +7167.jpg +23635.jpg +7382.jpg +9438.jpg +11050.jpg +16953.jpg +3217.jpg +15869.jpg +8852.jpg +7805.jpg +22915.jpg +20915.jpg +6355.jpg +20418.jpg +18621.jpg +4123.jpg +2150.jpg +27306.jpg +430.jpg +13852.jpg +17686.jpg +2755.jpg +2637.jpg +10148.jpg +14040.jpg +17327.jpg +2013.jpg +21837.jpg +28167.jpg +16644.jpg +505.jpg +24688.jpg +21932.jpg +26760.jpg +29718.jpg +3583.jpg +22929.jpg +16763.jpg +802.jpg +18302.jpg +26803.jpg +7377.jpg +9594.jpg +11995.jpg +588.jpg +13375.jpg +1173.jpg +14447.jpg +20026.jpg +7583.jpg +17304.jpg +13270.jpg +12972.jpg +18788.jpg +17611.jpg +15974.jpg +21117.jpg +25717.jpg +24680.jpg +3560.jpg +1100.jpg +22410.jpg +7896.jpg +24056.jpg +29250.jpg +26719.jpg +23415.jpg +8490.jpg +1087.jpg +16098.jpg +14327.jpg +23738.jpg +26658.jpg +5393.jpg +26732.jpg +6944.jpg +5327.jpg +397.jpg +7042.jpg +18854.jpg +3118.jpg +2573.jpg +8903.jpg +8638.jpg +24330.jpg +27642.jpg +4726.jpg +2247.jpg +23110.jpg +15684.jpg +15564.jpg +1791.jpg +2542.jpg +10988.jpg +6648.jpg +10373.jpg +15335.jpg +5104.jpg +5050.jpg +3211.jpg +15106.jpg +17273.jpg +13554.jpg +26731.jpg +1852.jpg +28837.jpg +16019.jpg +4103.jpg +16774.jpg +14342.jpg +24405.jpg +8609.jpg +29526.jpg +9928.jpg +2303.jpg +25588.jpg +16102.jpg +5249.jpg +16096.jpg +8859.jpg +26257.jpg +2915.jpg +28311.jpg +17961.jpg +22760.jpg +14097.jpg +11645.jpg +16196.jpg +22413.jpg +20355.jpg +11685.jpg +17998.jpg +13777.jpg +27968.jpg +7128.jpg +1348.jpg +7384.jpg +3709.jpg +28669.jpg +26125.jpg +18016.jpg +20659.jpg +5018.jpg +29397.jpg +1786.jpg +25276.jpg +19975.jpg +7506.jpg +16522.jpg +8901.jpg +11912.jpg +9184.jpg +27286.jpg +7546.jpg +22936.jpg +17933.jpg +12536.jpg +17189.jpg +619.jpg +24561.jpg +28022.jpg +18376.jpg +13160.jpg +26478.jpg +14511.jpg +6758.jpg +12156.jpg +8258.jpg +10225.jpg +4477.jpg +5998.jpg +25845.jpg +10203.jpg +7919.jpg +20027.jpg +11316.jpg +27082.jpg +21228.jpg +22166.jpg +28878.jpg +1405.jpg +24935.jpg +14013.jpg +509.jpg +15215.jpg +20043.jpg +6362.jpg +28327.jpg +29556.jpg +16485.jpg +15092.jpg +6612.jpg +11335.jpg +4273.jpg +22180.jpg +11670.jpg +8764.jpg +8121.jpg +7527.jpg +22232.jpg +24557.jpg +28544.jpg +27134.jpg +29915.jpg +20825.jpg +18546.jpg +28487.jpg +16510.jpg +13717.jpg +5675.jpg +8694.jpg +14614.jpg +5764.jpg +24656.jpg +20324.jpg +25765.jpg +16470.jpg +340.jpg +5073.jpg +16219.jpg +5447.jpg +21649.jpg +14971.jpg +15508.jpg +28812.jpg +13766.jpg +12083.jpg +1680.jpg +18351.jpg +19391.jpg +16746.jpg +1231.jpg +18032.jpg +4024.jpg +8516.jpg +11609.jpg +21807.jpg +10396.jpg +9057.jpg +25156.jpg +6510.jpg +18102.jpg +2309.jpg +28687.jpg +13343.jpg +7871.jpg +11440.jpg +23027.jpg +21492.jpg +17041.jpg +23961.jpg +22099.jpg +1856.jpg +28625.jpg +6472.jpg +24358.jpg +20753.jpg +708.jpg +27323.jpg +21690.jpg +13428.jpg +14566.jpg +11464.jpg +18739.jpg +13320.jpg +13192.jpg +21487.jpg +24467.jpg +11928.jpg +5807.jpg +8216.jpg +5932.jpg +3324.jpg +13408.jpg +1890.jpg +25625.jpg +25151.jpg +6714.jpg +25457.jpg +24415.jpg +16402.jpg +29743.jpg +925.jpg +19471.jpg +10041.jpg +12811.jpg +25243.jpg +19295.jpg +11614.jpg +28666.jpg +24923.jpg +19780.jpg +20937.jpg +22435.jpg +29181.jpg +5162.jpg +8309.jpg +22249.jpg +18595.jpg +2404.jpg +22516.jpg +19006.jpg +25654.jpg +15638.jpg +29116.jpg +13362.jpg +10412.jpg +3374.jpg +25148.jpg +5873.jpg +20583.jpg +10209.jpg +18138.jpg +23136.jpg +21160.jpg +4155.jpg +20292.jpg +25191.jpg +18956.jpg +9487.jpg +567.jpg +17495.jpg +9321.jpg +26889.jpg +4601.jpg +9147.jpg +5742.jpg +6607.jpg +17045.jpg +13482.jpg +14308.jpg +3491.jpg +15660.jpg +15194.jpg +1080.jpg +14289.jpg +11010.jpg +4475.jpg +27069.jpg +579.jpg +14493.jpg +8405.jpg +6175.jpg +7509.jpg +22058.jpg +3611.jpg +18712.jpg +8727.jpg +26319.jpg +6921.jpg +1061.jpg +23367.jpg +18496.jpg +13112.jpg +23124.jpg +2106.jpg +7800.jpg +24552.jpg +3867.jpg +10086.jpg +24165.jpg +23365.jpg +16920.jpg +18805.jpg +15696.jpg +25945.jpg +29625.jpg +24534.jpg +8827.jpg +5564.jpg +25202.jpg +2687.jpg +25492.jpg +26358.jpg +7311.jpg +12810.jpg +10124.jpg +11769.jpg +29681.jpg +9684.jpg +5121.jpg +16031.jpg +5555.jpg +29335.jpg +4371.jpg +8572.jpg +7412.jpg +12618.jpg +9628.jpg +3722.jpg +29649.jpg +10416.jpg +19021.jpg +28069.jpg +12322.jpg +5380.jpg +20428.jpg +18427.jpg +26302.jpg +14330.jpg +13707.jpg +11680.jpg +9203.jpg +8657.jpg +6602.jpg +7688.jpg +15994.jpg +6712.jpg +20618.jpg +29588.jpg +2029.jpg +13811.jpg +4085.jpg +10934.jpg +18769.jpg +14895.jpg +13636.jpg +17657.jpg +22214.jpg +21009.jpg +326.jpg +7976.jpg +29177.jpg +6313.jpg +6321.jpg +18422.jpg +2608.jpg +11858.jpg +28083.jpg +13581.jpg +4270.jpg +1582.jpg +27424.jpg +18906.jpg +4684.jpg diff --git a/lama/fetch_data/val_shuffled.flist b/lama/fetch_data/val_shuffled.flist new file mode 100644 index 0000000000000000000000000000000000000000..0e4aeb451bfd6c8c6191252f42dc253ece16238b --- /dev/null +++ b/lama/fetch_data/val_shuffled.flist @@ -0,0 +1,2000 @@ +25531.jpg +15329.jpg +23340.jpg +29014.jpg +29920.jpg +193.jpg +24466.jpg +29690.jpg +27615.jpg +3813.jpg +25896.jpg +27553.jpg +5825.jpg +16241.jpg +8748.jpg +14401.jpg +26190.jpg +2806.jpg +28502.jpg +22740.jpg +6243.jpg +20684.jpg +4633.jpg +18533.jpg +5975.jpg +3011.jpg +9515.jpg +21015.jpg +2675.jpg +27938.jpg +4139.jpg +5264.jpg +19139.jpg +22291.jpg +13532.jpg +22967.jpg +16879.jpg +14858.jpg +13361.jpg +20503.jpg +4918.jpg +7673.jpg +24784.jpg +9804.jpg +29194.jpg +14571.jpg +25455.jpg +20215.jpg +22844.jpg +10310.jpg +11117.jpg +6423.jpg +24124.jpg +5334.jpg +14209.jpg +10580.jpg +20448.jpg +25933.jpg +10475.jpg +29401.jpg +14880.jpg +29572.jpg +24299.jpg +14849.jpg +15898.jpg +26683.jpg +27901.jpg +22515.jpg +14501.jpg +4482.jpg +12281.jpg +15377.jpg +3588.jpg +2624.jpg +7830.jpg +28490.jpg +18928.jpg +18354.jpg +5509.jpg +28113.jpg +25761.jpg +14300.jpg +20736.jpg +23043.jpg +11544.jpg +7427.jpg +9484.jpg +4699.jpg +2323.jpg +7464.jpg +6133.jpg +22237.jpg +18272.jpg +7266.jpg +19435.jpg +10453.jpg +1668.jpg +9416.jpg +29836.jpg +22086.jpg +22660.jpg +10523.jpg +13830.jpg +4271.jpg +26168.jpg +26149.jpg +27779.jpg +19789.jpg +17847.jpg +15977.jpg +3111.jpg +20933.jpg +4358.jpg +13418.jpg +899.jpg +25720.jpg +20882.jpg +28191.jpg +15554.jpg +14915.jpg +6998.jpg +8137.jpg +13849.jpg +26971.jpg +22313.jpg +11590.jpg +5464.jpg +18972.jpg +25224.jpg +3629.jpg +12138.jpg +16128.jpg +16117.jpg +21457.jpg +23551.jpg +6913.jpg +5430.jpg +27417.jpg +2623.jpg +19216.jpg +24082.jpg +19648.jpg +9472.jpg +21163.jpg +26236.jpg +10361.jpg +12007.jpg +4202.jpg +15381.jpg +13787.jpg +7532.jpg +18609.jpg +18061.jpg +14218.jpg +20203.jpg +18705.jpg +1677.jpg +16723.jpg +963.jpg +22129.jpg +282.jpg +27726.jpg +19544.jpg +3346.jpg +10322.jpg +22343.jpg +19820.jpg +9857.jpg +21429.jpg +7572.jpg +1306.jpg +2093.jpg +4598.jpg +26177.jpg +9056.jpg +356.jpg +4671.jpg +6431.jpg +15401.jpg +13996.jpg +28413.jpg +22883.jpg +23445.jpg +28516.jpg +15707.jpg +5553.jpg +16377.jpg +29755.jpg +14616.jpg +4220.jpg +22238.jpg +18952.jpg +4430.jpg +29545.jpg +28783.jpg +15916.jpg +3460.jpg +18230.jpg +20135.jpg +11333.jpg +7491.jpg +29011.jpg +12713.jpg +24662.jpg +5401.jpg +26238.jpg +25821.jpg +28300.jpg +13740.jpg +11859.jpg +8160.jpg +22532.jpg +15430.jpg +18362.jpg +20993.jpg +8384.jpg +286.jpg +21483.jpg +2673.jpg +28270.jpg +28232.jpg +8331.jpg +29215.jpg +3524.jpg +7254.jpg +6949.jpg +10772.jpg +467.jpg +15709.jpg +28840.jpg +13808.jpg +24862.jpg +21129.jpg +24070.jpg +29012.jpg +11463.jpg +25778.jpg +11256.jpg +21134.jpg +19588.jpg +11854.jpg +25938.jpg +21367.jpg +11202.jpg +14263.jpg +5094.jpg +16302.jpg +8830.jpg +9914.jpg +29900.jpg +12763.jpg +14175.jpg +26744.jpg +19563.jpg +6399.jpg +17558.jpg +6422.jpg +16078.jpg +10228.jpg +29348.jpg +13219.jpg +12784.jpg +23063.jpg +13171.jpg +756.jpg +7418.jpg +1453.jpg +17448.jpg +9686.jpg +8374.jpg +8182.jpg +120.jpg +23305.jpg +18784.jpg +738.jpg +6833.jpg +20808.jpg +13585.jpg +25789.jpg +4947.jpg +20558.jpg +20232.jpg +11583.jpg +29018.jpg +1721.jpg +24428.jpg +12097.jpg +9280.jpg +12754.jpg +29081.jpg +25676.jpg +1023.jpg +17688.jpg +2167.jpg +4792.jpg +25381.jpg +19925.jpg +16678.jpg +3046.jpg +25252.jpg +1695.jpg +18404.jpg +27455.jpg +13691.jpg +27845.jpg +7334.jpg +25082.jpg +14996.jpg +22447.jpg +29347.jpg +9413.jpg +22308.jpg +26559.jpg +20881.jpg +3564.jpg +26643.jpg +28243.jpg +17999.jpg +26505.jpg +677.jpg +28219.jpg +17818.jpg +9004.jpg +7249.jpg +16685.jpg +10047.jpg +13303.jpg +18929.jpg +19734.jpg +28670.jpg +22307.jpg +13178.jpg +172.jpg +5341.jpg +7401.jpg +15288.jpg +23114.jpg +28885.jpg +15279.jpg +22724.jpg +18288.jpg +7453.jpg +2843.jpg +27171.jpg +22622.jpg +14886.jpg +10154.jpg +17083.jpg +1913.jpg +13228.jpg +13530.jpg +18443.jpg +24159.jpg +15404.jpg +8916.jpg +22629.jpg +7719.jpg +9653.jpg +9609.jpg +1287.jpg +29216.jpg +21906.jpg +11045.jpg +7118.jpg +18580.jpg +16560.jpg +22027.jpg +12133.jpg +17932.jpg +13230.jpg +22613.jpg +10241.jpg +174.jpg +11495.jpg +15549.jpg +3293.jpg +29869.jpg +21803.jpg +25196.jpg +15272.jpg +20214.jpg +9175.jpg +7568.jpg +24834.jpg +7540.jpg +11819.jpg +17506.jpg +23675.jpg +15816.jpg +12506.jpg +6857.jpg +12457.jpg +12144.jpg +19015.jpg +29024.jpg +19876.jpg +18940.jpg +25853.jpg +22952.jpg +8640.jpg +81.jpg +26222.jpg +22492.jpg +1320.jpg +549.jpg +23981.jpg +22757.jpg +16025.jpg +4764.jpg +3442.jpg +22645.jpg +2538.jpg +9923.jpg +17080.jpg +27639.jpg +7757.jpg +1865.jpg +11702.jpg +9988.jpg +11429.jpg +24046.jpg +7182.jpg +26538.jpg +22101.jpg +16113.jpg +5721.jpg +14.jpg +17520.jpg +12064.jpg +2669.jpg +26202.jpg +12678.jpg +25649.jpg +23368.jpg +28016.jpg +9023.jpg +15266.jpg +23947.jpg +20593.jpg +22652.jpg +16595.jpg +1389.jpg +6812.jpg +16566.jpg +3424.jpg +22431.jpg +1082.jpg +3361.jpg +13932.jpg +3239.jpg +468.jpg +2183.jpg +7139.jpg +24592.jpg +17391.jpg +20416.jpg +10713.jpg +22022.jpg +436.jpg +24084.jpg +29142.jpg +8378.jpg +11537.jpg +23782.jpg +21782.jpg +17535.jpg +27709.jpg +5538.jpg +17420.jpg +21676.jpg +5254.jpg +22653.jpg +26492.jpg +26919.jpg +29521.jpg +19681.jpg +27223.jpg +23891.jpg +5179.jpg +5942.jpg +17302.jpg +22080.jpg +24780.jpg +6248.jpg +1481.jpg +19027.jpg +27960.jpg +1579.jpg +11347.jpg +12547.jpg +21892.jpg +7874.jpg +1235.jpg +17921.jpg +5732.jpg +24651.jpg +27395.jpg +11782.jpg +2366.jpg +479.jpg +12541.jpg +448.jpg +18664.jpg +3022.jpg +17180.jpg +8967.jpg +14807.jpg +24247.jpg +13969.jpg +10666.jpg +24827.jpg +11426.jpg +24801.jpg +2626.jpg +3779.jpg +17734.jpg +16111.jpg +29410.jpg +5643.jpg +19276.jpg +28713.jpg +3343.jpg +23373.jpg +26466.jpg +4991.jpg +6027.jpg +20608.jpg +3458.jpg +7659.jpg +17241.jpg +23588.jpg +2548.jpg +26252.jpg +14319.jpg +12326.jpg +23014.jpg +23604.jpg +2076.jpg +28888.jpg +5993.jpg +29887.jpg +26135.jpg +3663.jpg +26203.jpg +18141.jpg +28739.jpg +14286.jpg +4429.jpg +11087.jpg +17513.jpg +14415.jpg +21180.jpg +22116.jpg +14956.jpg +10252.jpg +15460.jpg +22335.jpg +5097.jpg +13771.jpg +8613.jpg +17945.jpg +5024.jpg +20217.jpg +23691.jpg +28532.jpg +21178.jpg +21904.jpg +6117.jpg +5159.jpg +9533.jpg +19074.jpg +27734.jpg +22405.jpg +10853.jpg +14240.jpg +10656.jpg +24081.jpg +4047.jpg +24379.jpg +7247.jpg +25587.jpg +4777.jpg +26932.jpg +1468.jpg +26401.jpg +590.jpg +4278.jpg +15839.jpg +7161.jpg +21543.jpg +2804.jpg +24469.jpg +24139.jpg +18660.jpg +2137.jpg +24609.jpg +4461.jpg +16673.jpg +22692.jpg +23594.jpg +9236.jpg +11824.jpg +17465.jpg +24999.jpg +638.jpg +23385.jpg +764.jpg +4717.jpg +5465.jpg +9629.jpg +6182.jpg +22388.jpg +2664.jpg +5571.jpg +24909.jpg +17306.jpg +12973.jpg +818.jpg +28936.jpg +12342.jpg +28101.jpg +17345.jpg +21149.jpg +8083.jpg +16468.jpg +19830.jpg +1927.jpg +1554.jpg +22992.jpg +18134.jpg +23911.jpg +21941.jpg +21871.jpg +7828.jpg +25560.jpg +9631.jpg +18001.jpg +15423.jpg +11798.jpg +21707.jpg +9235.jpg +5630.jpg +22095.jpg +6549.jpg +9676.jpg +14546.jpg +25773.jpg +9294.jpg +24322.jpg +27751.jpg +24260.jpg +25799.jpg +845.jpg +24679.jpg +5284.jpg +18050.jpg +19828.jpg +20362.jpg +15876.jpg +28912.jpg +17303.jpg +26738.jpg +21585.jpg +8999.jpg +7317.jpg +2539.jpg +20372.jpg +24456.jpg +23552.jpg +27183.jpg +22389.jpg +7223.jpg +19055.jpg +12982.jpg +19478.jpg +643.jpg +3267.jpg +26195.jpg +7574.jpg +22295.jpg +3837.jpg +24897.jpg +3733.jpg +10328.jpg +11049.jpg +29357.jpg +23448.jpg +7019.jpg +11533.jpg +20732.jpg +23677.jpg +7138.jpg +24074.jpg +13256.jpg +26778.jpg +16966.jpg +22481.jpg +13330.jpg +8087.jpg +26310.jpg +13574.jpg +16716.jpg +8919.jpg +25432.jpg +1598.jpg +1358.jpg +28377.jpg +21416.jpg +26576.jpg +9316.jpg +17838.jpg +16924.jpg +15176.jpg +12327.jpg +1488.jpg +20981.jpg +1086.jpg +12617.jpg +26182.jpg +5492.jpg +13014.jpg +23080.jpg +23999.jpg +18689.jpg +17330.jpg +12477.jpg +29045.jpg +16419.jpg +6459.jpg +22605.jpg +17518.jpg +8386.jpg +29428.jpg +15181.jpg +10156.jpg +18419.jpg +26698.jpg +25678.jpg +1249.jpg +19943.jpg +5747.jpg +24409.jpg +13388.jpg +24077.jpg +21781.jpg +10288.jpg +7489.jpg +25634.jpg +3836.jpg +16512.jpg +17040.jpg +22890.jpg +6171.jpg +24588.jpg +17755.jpg +20649.jpg +26003.jpg +27583.jpg +12175.jpg +29636.jpg +13122.jpg +12230.jpg +20668.jpg +21200.jpg +25556.jpg +869.jpg +13487.jpg +7657.jpg +6440.jpg +17259.jpg +2358.jpg +1547.jpg +19394.jpg +29637.jpg +27120.jpg +27951.jpg +18532.jpg +29123.jpg +4988.jpg +1470.jpg +18615.jpg +28181.jpg +16373.jpg +7775.jpg +27419.jpg +12302.jpg +5278.jpg +29535.jpg +11718.jpg +16273.jpg +26384.jpg +26132.jpg +28023.jpg +26232.jpg +6682.jpg +9718.jpg +14568.jpg +17946.jpg +29893.jpg +8587.jpg +12725.jpg +10733.jpg +27169.jpg +14404.jpg +2020.jpg +8696.jpg +6181.jpg +13046.jpg +16339.jpg +24055.jpg +11450.jpg +23468.jpg +9996.jpg +878.jpg +28047.jpg +17683.jpg +26515.jpg +19777.jpg +24580.jpg +4184.jpg +7480.jpg +16228.jpg +4877.jpg +14759.jpg +15393.jpg +27378.jpg +14801.jpg +16504.jpg +18933.jpg +25545.jpg +19196.jpg +17663.jpg +922.jpg +3644.jpg +18088.jpg +8654.jpg +3403.jpg +2914.jpg +25511.jpg +17233.jpg +6009.jpg +13305.jpg +25510.jpg +26277.jpg +22026.jpg +1551.jpg +22735.jpg +6191.jpg +11577.jpg +14725.jpg +12953.jpg +7922.jpg +22628.jpg +19689.jpg +10359.jpg +15015.jpg +24333.jpg +5865.jpg +7957.jpg +12085.jpg +4747.jpg +7538.jpg +11877.jpg +8767.jpg +788.jpg +5369.jpg +14385.jpg +266.jpg +6813.jpg +12174.jpg +13001.jpg +26958.jpg +22683.jpg +2808.jpg +3004.jpg +20449.jpg +23690.jpg +4331.jpg +27477.jpg +23254.jpg +13851.jpg +4373.jpg +14205.jpg +10451.jpg +17431.jpg +22727.jpg +2887.jpg +28892.jpg +24638.jpg +24058.jpg +13695.jpg +9730.jpg +7972.jpg +8310.jpg +18399.jpg +5778.jpg +23627.jpg +23147.jpg +9993.jpg +166.jpg +4993.jpg +28488.jpg +19031.jpg +9833.jpg +28178.jpg +17740.jpg +28341.jpg +8892.jpg +6032.jpg +17509.jpg +13166.jpg +4758.jpg +6573.jpg +14154.jpg +23006.jpg +1524.jpg +2591.jpg +14790.jpg +8304.jpg +22182.jpg +8217.jpg +19622.jpg +7913.jpg +8315.jpg +20723.jpg +18266.jpg +19763.jpg +14585.jpg +27248.jpg +9126.jpg +26102.jpg +1700.jpg +3557.jpg +19608.jpg +25751.jpg +87.jpg +6438.jpg +14119.jpg +22571.jpg +23279.jpg +19628.jpg +19909.jpg +2616.jpg +4114.jpg +9678.jpg +13780.jpg +8292.jpg +6147.jpg +12291.jpg +16778.jpg +8606.jpg +13592.jpg +5203.jpg +19992.jpg +19505.jpg +24120.jpg +8765.jpg +24438.jpg +16792.jpg +19745.jpg +23827.jpg +3761.jpg +7638.jpg +10568.jpg +14808.jpg +5893.jpg +29132.jpg +8514.jpg +13558.jpg +4268.jpg +19637.jpg +26957.jpg +15583.jpg +15080.jpg +10814.jpg +18275.jpg +5060.jpg +21788.jpg +11193.jpg +1710.jpg +5169.jpg +13697.jpg +13038.jpg +7939.jpg +3781.jpg +14491.jpg +28128.jpg +23933.jpg +22711.jpg +5477.jpg +23971.jpg +7297.jpg +17067.jpg +8602.jpg +26918.jpg +4111.jpg +20848.jpg +7958.jpg +15687.jpg +14755.jpg +15878.jpg +4180.jpg +23982.jpg +9489.jpg +25670.jpg +23751.jpg +4071.jpg +6073.jpg +15220.jpg +24631.jpg +1409.jpg +16137.jpg +11127.jpg +26911.jpg +9262.jpg +26337.jpg +7640.jpg +25125.jpg +25334.jpg +25502.jpg +16087.jpg +8347.jpg +21321.jpg +3251.jpg +28155.jpg +29267.jpg +4005.jpg +14660.jpg +29726.jpg +21775.jpg +5202.jpg +3966.jpg +15083.jpg +9937.jpg +7807.jpg +21705.jpg +12260.jpg +23164.jpg +11699.jpg +13537.jpg +7905.jpg +20375.jpg +16795.jpg +929.jpg +10918.jpg +9756.jpg +5109.jpg +2960.jpg +27358.jpg +12112.jpg +27495.jpg +5167.jpg +4272.jpg +4212.jpg +6333.jpg +25433.jpg +20138.jpg +21363.jpg +27988.jpg +25083.jpg +19899.jpg +12856.jpg +2479.jpg +18714.jpg +19282.jpg +18744.jpg +3145.jpg +7472.jpg +13736.jpg +6443.jpg +24551.jpg +27865.jpg +27860.jpg +13556.jpg +16469.jpg +29050.jpg +25431.jpg +8887.jpg +27391.jpg +25121.jpg +5490.jpg +12859.jpg +4468.jpg +26474.jpg +9102.jpg +9667.jpg +28903.jpg +17228.jpg +17222.jpg +18039.jpg +75.jpg +3881.jpg +22786.jpg +13427.jpg +9396.jpg +4388.jpg +16654.jpg +16835.jpg +13735.jpg +1248.jpg +19746.jpg +10827.jpg +8040.jpg +13679.jpg +28889.jpg +29687.jpg +4735.jpg +25064.jpg +12711.jpg +7259.jpg +19156.jpg +21253.jpg +20955.jpg +20831.jpg +27950.jpg +29901.jpg +29294.jpg +13910.jpg +1541.jpg +9427.jpg +24736.jpg +3758.jpg +13609.jpg +29220.jpg +23643.jpg +21177.jpg +23160.jpg +11821.jpg +10611.jpg +23089.jpg +24684.jpg +2222.jpg +3592.jpg +18824.jpg +19575.jpg +14583.jpg +27463.jpg +2848.jpg +26628.jpg +8491.jpg +26298.jpg +8225.jpg +8231.jpg +20191.jpg +7316.jpg +9719.jpg +24202.jpg +3389.jpg +28049.jpg +19918.jpg +22151.jpg +4102.jpg +3601.jpg +22910.jpg +18701.jpg +19998.jpg +26050.jpg +21139.jpg +8170.jpg +13598.jpg +24489.jpg +29786.jpg +21265.jpg +22106.jpg +23059.jpg +26955.jpg +20497.jpg +8458.jpg +22996.jpg +5445.jpg +23081.jpg +22457.jpg +26158.jpg +21891.jpg +3550.jpg +375.jpg +6239.jpg +20630.jpg +17350.jpg +2333.jpg +24160.jpg +6038.jpg +3799.jpg +26350.jpg +15416.jpg +2433.jpg +11519.jpg +8403.jpg +1137.jpg +7069.jpg +7849.jpg +17159.jpg +10658.jpg +19527.jpg +6179.jpg +24987.jpg +6736.jpg +3.jpg +24387.jpg +24189.jpg +7214.jpg +20632.jpg +6653.jpg +4608.jpg +2088.jpg +10618.jpg +18961.jpg +7876.jpg +13401.jpg +11677.jpg +24103.jpg +13457.jpg +1121.jpg +3669.jpg +22833.jpg +7073.jpg +18837.jpg +21820.jpg +19507.jpg +10341.jpg +19673.jpg +8261.jpg +18542.jpg +14150.jpg +5818.jpg +23309.jpg +341.jpg +27085.jpg +22268.jpg +20521.jpg +14535.jpg +2498.jpg +22479.jpg +27843.jpg +25580.jpg +687.jpg +832.jpg +12554.jpg +9873.jpg +29770.jpg +14581.jpg +19080.jpg +6134.jpg +331.jpg +23878.jpg +11246.jpg +19907.jpg +10099.jpg +17876.jpg +9072.jpg +6092.jpg +19863.jpg +872.jpg +3121.jpg +14393.jpg +20097.jpg +3259.jpg +3237.jpg +2355.jpg +3622.jpg +12734.jpg +11409.jpg +10302.jpg +3229.jpg +63.jpg +12324.jpg +25244.jpg +12575.jpg +26943.jpg +24997.jpg +8024.jpg +17705.jpg +14671.jpg +12502.jpg +25570.jpg +1987.jpg +1385.jpg +19783.jpg +14437.jpg +2499.jpg +28836.jpg +13523.jpg +25636.jpg +12699.jpg +7206.jpg +27192.jpg +11611.jpg +1040.jpg +11998.jpg +23721.jpg +18588.jpg +3330.jpg +12341.jpg +9118.jpg +22522.jpg +25732.jpg +3340.jpg +14554.jpg +3742.jpg +12443.jpg +24163.jpg +2622.jpg +27797.jpg +20124.jpg +22300.jpg +20155.jpg +20648.jpg +7735.jpg +16223.jpg +5044.jpg +6249.jpg +27834.jpg +7251.jpg +18771.jpg +12343.jpg +7442.jpg +3735.jpg +10694.jpg +22211.jpg +15602.jpg +21772.jpg +16563.jpg +10567.jpg +29289.jpg +9983.jpg +21308.jpg +12124.jpg +8197.jpg +16335.jpg +20619.jpg +10767.jpg +14133.jpg +11794.jpg +9984.jpg +17735.jpg +25138.jpg +7776.jpg +28028.jpg +10768.jpg +8992.jpg +14544.jpg +18045.jpg +20167.jpg +13728.jpg +5227.jpg +5006.jpg +15391.jpg +6793.jpg +20940.jpg +24141.jpg +25014.jpg +11551.jpg +23572.jpg +14973.jpg +4162.jpg +25102.jpg +29730.jpg +14931.jpg +19333.jpg +21422.jpg +1290.jpg +23804.jpg +3732.jpg +17702.jpg +7330.jpg +545.jpg +18679.jpg +6836.jpg +15023.jpg +4171.jpg +12940.jpg +21823.jpg +24522.jpg +17237.jpg +27680.jpg +23016.jpg +26069.jpg +18358.jpg +25893.jpg +4193.jpg +26459.jpg +3370.jpg +27510.jpg +27786.jpg +27771.jpg +24366.jpg +25393.jpg +9795.jpg +13463.jpg +23051.jpg +21119.jpg +5453.jpg +1933.jpg +15394.jpg +4175.jpg +18175.jpg +23708.jpg +19605.jpg +23671.jpg +29126.jpg +17697.jpg +22261.jpg +8826.jpg +20991.jpg +17833.jpg +11943.jpg +4964.jpg +28381.jpg +17689.jpg +4994.jpg +19267.jpg +10238.jpg +15538.jpg +18379.jpg +15743.jpg +7366.jpg +28333.jpg +25791.jpg +2858.jpg +19318.jpg +8965.jpg +28751.jpg +3472.jpg +17878.jpg +17220.jpg +4652.jpg +23801.jpg +6264.jpg +14377.jpg +26049.jpg +1817.jpg +5580.jpg +9356.jpg +12994.jpg +10009.jpg +23865.jpg +2453.jpg +29593.jpg +19641.jpg +6621.jpg +15899.jpg +21662.jpg +25355.jpg +16880.jpg +15992.jpg +19101.jpg +28140.jpg +2727.jpg +22287.jpg +13690.jpg +25807.jpg +18493.jpg +25691.jpg +28679.jpg +15270.jpg +2603.jpg +23324.jpg +12091.jpg +4034.jpg +25067.jpg +19066.jpg +7036.jpg +6722.jpg +10612.jpg +7368.jpg +24950.jpg +13194.jpg +10834.jpg +9748.jpg +18895.jpg +28431.jpg +5652.jpg +7151.jpg +25289.jpg +9642.jpg +29864.jpg +16351.jpg +10777.jpg +8663.jpg +6533.jpg +28536.jpg +14948.jpg +6673.jpg +4679.jpg +8961.jpg +24622.jpg +25145.jpg +14480.jpg +26426.jpg +11825.jpg +27299.jpg +28307.jpg +6530.jpg +24493.jpg +14983.jpg +21549.jpg +13086.jpg +16347.jpg +21002.jpg +21850.jpg +6025.jpg +4206.jpg +6876.jpg +13809.jpg +16554.jpg +12753.jpg +20953.jpg +20213.jpg +15422.jpg +5342.jpg +328.jpg +22632.jpg +20130.jpg +23462.jpg +20121.jpg +25390.jpg +1827.jpg +4997.jpg +8779.jpg +15753.jpg +13913.jpg +263.jpg +23612.jpg +5432.jpg +7332.jpg +22562.jpg +17960.jpg +1410.jpg +21093.jpg +11513.jpg +23756.jpg +26507.jpg +17776.jpg +704.jpg +1633.jpg +6963.jpg +21521.jpg +19920.jpg +4282.jpg +3787.jpg +16825.jpg +1068.jpg +28077.jpg +24105.jpg +11740.jpg +24578.jpg +2893.jpg +23382.jpg +29977.jpg +22768.jpg +24354.jpg +227.jpg +24649.jpg +28917.jpg +8534.jpg +14322.jpg +8363.jpg +11286.jpg +20410.jpg +6734.jpg +7544.jpg +6514.jpg +17859.jpg +2157.jpg +19198.jpg +8132.jpg +346.jpg +7660.jpg +28081.jpg +12740.jpg +165.jpg +21563.jpg +20834.jpg +26469.jpg +10472.jpg +28645.jpg +16541.jpg +17073.jpg +6420.jpg +24825.jpg +14662.jpg +13151.jpg +7252.jpg +18224.jpg +10579.jpg +20185.jpg +26980.jpg +18996.jpg +508.jpg +16506.jpg +23453.jpg +14411.jpg +13823.jpg +2083.jpg +5425.jpg +28706.jpg +9768.jpg +29512.jpg +16246.jpg +4802.jpg +14645.jpg +13029.jpg +25051.jpg +1202.jpg +19025.jpg +29016.jpg +21852.jpg +20169.jpg +21716.jpg +25032.jpg +19631.jpg +700.jpg +21412.jpg +15014.jpg +26750.jpg +22351.jpg +18199.jpg +9603.jpg +14357.jpg +7991.jpg +28780.jpg +4925.jpg +26907.jpg +10771.jpg +6522.jpg +10240.jpg +16371.jpg +22609.jpg +23504.jpg +5576.jpg +10090.jpg +8630.jpg +9205.jpg +17342.jpg +9599.jpg +22773.jpg +14294.jpg +19149.jpg +20804.jpg +23575.jpg +13047.jpg +14710.jpg +20236.jpg +10167.jpg +15764.jpg +19045.jpg +19511.jpg +27796.jpg +5746.jpg +10149.jpg +2329.jpg +6705.jpg +12621.jpg +20231.jpg +4384.jpg +22507.jpg +19345.jpg +26144.jpg +6598.jpg +6702.jpg +5541.jpg +204.jpg +23256.jpg +28944.jpg +9486.jpg +24797.jpg +19504.jpg +19558.jpg +12910.jpg +27799.jpg +986.jpg +17896.jpg +1128.jpg +25017.jpg +11654.jpg +25740.jpg +28556.jpg +5788.jpg +8366.jpg +5752.jpg +9892.jpg +26155.jpg +23735.jpg +17276.jpg +8601.jpg +13906.jpg +19926.jpg +15050.jpg +27689.jpg +14171.jpg +5768.jpg +3308.jpg +117.jpg +28560.jpg +9479.jpg +19412.jpg +1413.jpg +11313.jpg +29361.jpg +23002.jpg +6520.jpg +2823.jpg +8966.jpg +23506.jpg +4352.jpg +28242.jpg +1345.jpg +16743.jpg +27298.jpg +26888.jpg +16662.jpg +17427.jpg +12777.jpg +17337.jpg +12168.jpg +7900.jpg +1323.jpg +18538.jpg +28063.jpg +29100.jpg +29304.jpg +28583.jpg +3808.jpg +25497.jpg +19116.jpg +7858.jpg +24939.jpg +7131.jpg +13023.jpg +21455.jpg +19952.jpg +28158.jpg +29155.jpg +15455.jpg +16202.jpg +27536.jpg +24323.jpg +19924.jpg +13786.jpg +5247.jpg +15668.jpg +22887.jpg +2275.jpg +22177.jpg +15411.jpg +25648.jpg +19551.jpg +7850.jpg +15291.jpg +3009.jpg +17582.jpg +17413.jpg +8116.jpg +20536.jpg +13220.jpg +1671.jpg +6352.jpg +675.jpg +13449.jpg +3469.jpg +9387.jpg +18641.jpg +18932.jpg +12659.jpg +21741.jpg +23912.jpg +11715.jpg +21996.jpg +11481.jpg +16525.jpg +7875.jpg +24821.jpg +21994.jpg +28855.jpg +6513.jpg +20610.jpg +17807.jpg +11846.jpg +8157.jpg +18711.jpg +2874.jpg +21744.jpg +29599.jpg +9549.jpg +6525.jpg +7064.jpg +8218.jpg +19685.jpg +16264.jpg +2722.jpg +5275.jpg +15415.jpg +25333.jpg +241.jpg +2018.jpg +10269.jpg +2241.jpg +12319.jpg +12949.jpg +26859.jpg +14036.jpg +17753.jpg +26455.jpg +11448.jpg +25953.jpg +27838.jpg +26829.jpg +998.jpg +9076.jpg +7786.jpg +26655.jpg +242.jpg +585.jpg +3222.jpg +14762.jpg +12744.jpg +6527.jpg +12275.jpg +5299.jpg +17256.jpg +18680.jpg +19911.jpg +25694.jpg +13936.jpg +6155.jpg +9703.jpg +26671.jpg +25186.jpg +24699.jpg +17801.jpg +13124.jpg +14626.jpg +4076.jpg +29856.jpg +811.jpg +27975.jpg +17065.jpg +12938.jpg +13301.jpg +29473.jpg +13810.jpg +7802.jpg +378.jpg +7397.jpg +24881.jpg +15169.jpg +26739.jpg +23469.jpg +13734.jpg +9220.jpg +4002.jpg +5406.jpg +18690.jpg +11546.jpg +3123.jpg +12820.jpg +27842.jpg +20412.jpg +4648.jpg +17371.jpg +1661.jpg +3490.jpg +9906.jpg +20839.jpg +13071.jpg +22048.jpg +5423.jpg +22566.jpg +29576.jpg +25210.jpg +23223.jpg +22961.jpg +12865.jpg +24949.jpg +6997.jpg +9696.jpg +22424.jpg +14269.jpg +6874.jpg +1536.jpg +15888.jpg +23093.jpg +2678.jpg +17646.jpg +7777.jpg +21214.jpg +7774.jpg +26418.jpg +28015.jpg +20166.jpg +3825.jpg +24201.jpg +8317.jpg +14778.jpg +27354.jpg +12297.jpg +24751.jpg +22045.jpg +5715.jpg +16927.jpg +3904.jpg +22210.jpg +19164.jpg +16728.jpg +22001.jpg +29740.jpg +12380.jpg +22747.jpg +5195.jpg +20352.jpg +2816.jpg +5684.jpg +7932.jpg +29597.jpg +765.jpg +25263.jpg +26924.jpg +186.jpg +29633.jpg +1240.jpg +9237.jpg +25910.jpg +29842.jpg +28285.jpg +29933.jpg +20746.jpg +6882.jpg +19849.jpg +501.jpg +10624.jpg +10257.jpg +27767.jpg +9194.jpg +12635.jpg +10163.jpg +26083.jpg +14443.jpg +9585.jpg +4122.jpg +22546.jpg +29826.jpg +23702.jpg +8328.jpg +15442.jpg +13429.jpg +3246.jpg +11863.jpg +15700.jpg +5302.jpg +16824.jpg +13608.jpg +12499.jpg +12730.jpg +4290.jpg +2139.jpg +12029.jpg +29257.jpg +18063.jpg +20935.jpg +27222.jpg +18024.jpg +17092.jpg +19108.jpg +2908.jpg +22260.jpg +2070.jpg +16758.jpg +26794.jpg +4834.jpg +23293.jpg +5957.jpg +2793.jpg +15851.jpg +21315.jpg +16009.jpg +1251.jpg +10388.jpg +2466.jpg +10638.jpg +25034.jpg +15151.jpg +13741.jpg +27270.jpg +4833.jpg +6023.jpg +28972.jpg +7260.jpg +17444.jpg +15699.jpg +23730.jpg +20254.jpg +17959.jpg +21653.jpg +28331.jpg +10644.jpg +23935.jpg +4600.jpg +2720.jpg +6569.jpg +2528.jpg diff --git a/lama/models/ade20k/__init__.py b/lama/models/ade20k/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..773cfc4664eef45a4f6fe05bd3fe2aa2143fdb5c --- /dev/null +++ b/lama/models/ade20k/__init__.py @@ -0,0 +1 @@ +from .base import * \ No newline at end of file diff --git a/lama/models/ade20k/base.py b/lama/models/ade20k/base.py new file mode 100644 index 0000000000000000000000000000000000000000..8cdbe2d3e7dbadf4ed5e5a7cf2d248761ef25d9c --- /dev/null +++ b/lama/models/ade20k/base.py @@ -0,0 +1,627 @@ +"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch""" + +import os + +import pandas as pd +import torch +import torch.nn as nn +import torch.nn.functional as F +from scipy.io import loadmat +from torch.nn.modules import BatchNorm2d + +from . import resnet +from . import mobilenet + + +NUM_CLASS = 150 +base_path = os.path.dirname(os.path.abspath(__file__)) # current file path +colors_path = os.path.join(base_path, 'color150.mat') +classes_path = os.path.join(base_path, 'object150_info.csv') + +segm_options = dict(colors=loadmat(colors_path)['colors'], + classes=pd.read_csv(classes_path),) + + +class NormalizeTensor: + def __init__(self, mean, std, inplace=False): + """Normalize a tensor image with mean and standard deviation. + .. note:: + This transform acts out of place by default, i.e., it does not mutates the input tensor. + See :class:`~torchvision.transforms.Normalize` for more details. + Args: + tensor (Tensor): Tensor image of size (C, H, W) to be normalized. + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation inplace. + Returns: + Tensor: Normalized Tensor image. + """ + + self.mean = mean + self.std = std + self.inplace = inplace + + def __call__(self, tensor): + if not self.inplace: + tensor = tensor.clone() + + dtype = tensor.dtype + mean = torch.as_tensor(self.mean, dtype=dtype, device=tensor.device) + std = torch.as_tensor(self.std, dtype=dtype, device=tensor.device) + tensor.sub_(mean[None, :, None, None]).div_(std[None, :, None, None]) + return tensor + + +# Model Builder +class ModelBuilder: + # custom weights initialization + @staticmethod + def weights_init(m): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + nn.init.kaiming_normal_(m.weight.data) + elif classname.find('BatchNorm') != -1: + m.weight.data.fill_(1.) + m.bias.data.fill_(1e-4) + + @staticmethod + def build_encoder(arch='resnet50dilated', fc_dim=512, weights=''): + pretrained = True if len(weights) == 0 else False + arch = arch.lower() + if arch == 'mobilenetv2dilated': + orig_mobilenet = mobilenet.__dict__['mobilenetv2'](pretrained=pretrained) + net_encoder = MobileNetV2Dilated(orig_mobilenet, dilate_scale=8) + elif arch == 'resnet18': + orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained) + net_encoder = Resnet(orig_resnet) + elif arch == 'resnet18dilated': + orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained) + net_encoder = ResnetDilated(orig_resnet, dilate_scale=8) + elif arch == 'resnet50dilated': + orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained) + net_encoder = ResnetDilated(orig_resnet, dilate_scale=8) + elif arch == 'resnet50': + orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained) + net_encoder = Resnet(orig_resnet) + else: + raise Exception('Architecture undefined!') + + # encoders are usually pretrained + # net_encoder.apply(ModelBuilder.weights_init) + if len(weights) > 0: + print('Loading weights for net_encoder') + net_encoder.load_state_dict( + torch.load(weights, map_location=lambda storage, loc: storage), strict=False) + return net_encoder + + @staticmethod + def build_decoder(arch='ppm_deepsup', + fc_dim=512, num_class=NUM_CLASS, + weights='', use_softmax=False, drop_last_conv=False): + arch = arch.lower() + if arch == 'ppm_deepsup': + net_decoder = PPMDeepsup( + num_class=num_class, + fc_dim=fc_dim, + use_softmax=use_softmax, + drop_last_conv=drop_last_conv) + elif arch == 'c1_deepsup': + net_decoder = C1DeepSup( + num_class=num_class, + fc_dim=fc_dim, + use_softmax=use_softmax, + drop_last_conv=drop_last_conv) + else: + raise Exception('Architecture undefined!') + + net_decoder.apply(ModelBuilder.weights_init) + if len(weights) > 0: + print('Loading weights for net_decoder') + net_decoder.load_state_dict( + torch.load(weights, map_location=lambda storage, loc: storage), strict=False) + return net_decoder + + @staticmethod + def get_decoder(weights_path, arch_encoder, arch_decoder, fc_dim, drop_last_conv, *arts, **kwargs): + path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/decoder_epoch_20.pth') + return ModelBuilder.build_decoder(arch=arch_decoder, fc_dim=fc_dim, weights=path, use_softmax=True, drop_last_conv=drop_last_conv) + + @staticmethod + def get_encoder(weights_path, arch_encoder, arch_decoder, fc_dim, segmentation, + *arts, **kwargs): + if segmentation: + path = os.path.join(weights_path, 'ade20k', f'ade20k-{arch_encoder}-{arch_decoder}/encoder_epoch_20.pth') + else: + path = '' + return ModelBuilder.build_encoder(arch=arch_encoder, fc_dim=fc_dim, weights=path) + + +def conv3x3_bn_relu(in_planes, out_planes, stride=1): + return nn.Sequential( + nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), + BatchNorm2d(out_planes), + nn.ReLU(inplace=True), + ) + + +class SegmentationModule(nn.Module): + def __init__(self, + weights_path, + num_classes=150, + arch_encoder="resnet50dilated", + drop_last_conv=False, + net_enc=None, # None for Default encoder + net_dec=None, # None for Default decoder + encode=None, # {None, 'binary', 'color', 'sky'} + use_default_normalization=False, + return_feature_maps=False, + return_feature_maps_level=3, # {0, 1, 2, 3} + return_feature_maps_only=True, + **kwargs, + ): + super().__init__() + self.weights_path = weights_path + self.drop_last_conv = drop_last_conv + self.arch_encoder = arch_encoder + if self.arch_encoder == "resnet50dilated": + self.arch_decoder = "ppm_deepsup" + self.fc_dim = 2048 + elif self.arch_encoder == "mobilenetv2dilated": + self.arch_decoder = "c1_deepsup" + self.fc_dim = 320 + else: + raise NotImplementedError(f"No such arch_encoder={self.arch_encoder}") + model_builder_kwargs = dict(arch_encoder=self.arch_encoder, + arch_decoder=self.arch_decoder, + fc_dim=self.fc_dim, + drop_last_conv=drop_last_conv, + weights_path=self.weights_path) + + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + self.encoder = ModelBuilder.get_encoder(**model_builder_kwargs) if net_enc is None else net_enc + self.decoder = ModelBuilder.get_decoder(**model_builder_kwargs) if net_dec is None else net_dec + self.use_default_normalization = use_default_normalization + self.default_normalization = NormalizeTensor(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + + self.encode = encode + + self.return_feature_maps = return_feature_maps + + assert 0 <= return_feature_maps_level <= 3 + self.return_feature_maps_level = return_feature_maps_level + + def normalize_input(self, tensor): + if tensor.min() < 0 or tensor.max() > 1: + raise ValueError("Tensor should be 0..1 before using normalize_input") + return self.default_normalization(tensor) + + @property + def feature_maps_channels(self): + return 256 * 2**(self.return_feature_maps_level) # 256, 512, 1024, 2048 + + def forward(self, img_data, segSize=None): + if segSize is None: + raise NotImplementedError("Please pass segSize param. By default: (300, 300)") + + fmaps = self.encoder(img_data, return_feature_maps=True) + pred = self.decoder(fmaps, segSize=segSize) + + if self.return_feature_maps: + return pred, fmaps + # print("BINARY", img_data.shape, pred.shape) + return pred + + def multi_mask_from_multiclass(self, pred, classes): + def isin(ar1, ar2): + return (ar1[..., None] == ar2).any(-1).float() + return isin(pred, torch.LongTensor(classes).to(self.device)) + + @staticmethod + def multi_mask_from_multiclass_probs(scores, classes): + res = None + for c in classes: + if res is None: + res = scores[:, c] + else: + res += scores[:, c] + return res + + def predict(self, tensor, imgSizes=(-1,), # (300, 375, 450, 525, 600) + segSize=None): + """Entry-point for segmentation. Use this methods instead of forward + Arguments: + tensor {torch.Tensor} -- BCHW + Keyword Arguments: + imgSizes {tuple or list} -- imgSizes for segmentation input. + default: (300, 450) + original implementation: (300, 375, 450, 525, 600) + + """ + if segSize is None: + segSize = tensor.shape[-2:] + segSize = (tensor.shape[2], tensor.shape[3]) + with torch.no_grad(): + if self.use_default_normalization: + tensor = self.normalize_input(tensor) + scores = torch.zeros(1, NUM_CLASS, segSize[0], segSize[1]).to(self.device) + features = torch.zeros(1, self.feature_maps_channels, segSize[0], segSize[1]).to(self.device) + + result = [] + for img_size in imgSizes: + if img_size != -1: + img_data = F.interpolate(tensor.clone(), size=img_size) + else: + img_data = tensor.clone() + + if self.return_feature_maps: + pred_current, fmaps = self.forward(img_data, segSize=segSize) + else: + pred_current = self.forward(img_data, segSize=segSize) + + + result.append(pred_current) + scores = scores + pred_current / len(imgSizes) + + # Disclaimer: We use and aggregate only last fmaps: fmaps[3] + if self.return_feature_maps: + features = features + F.interpolate(fmaps[self.return_feature_maps_level], size=segSize) / len(imgSizes) + + _, pred = torch.max(scores, dim=1) + + if self.return_feature_maps: + return features + + return pred, result + + def get_edges(self, t): + edge = torch.cuda.ByteTensor(t.size()).zero_() + edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1]) + edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1]) + edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]) + edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]) + + if True: + return edge.half() + return edge.float() + + +# pyramid pooling, deep supervision +class PPMDeepsup(nn.Module): + def __init__(self, num_class=NUM_CLASS, fc_dim=4096, + use_softmax=False, pool_scales=(1, 2, 3, 6), + drop_last_conv=False): + super().__init__() + self.use_softmax = use_softmax + self.drop_last_conv = drop_last_conv + + self.ppm = [] + for scale in pool_scales: + self.ppm.append(nn.Sequential( + nn.AdaptiveAvgPool2d(scale), + nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), + BatchNorm2d(512), + nn.ReLU(inplace=True) + )) + self.ppm = nn.ModuleList(self.ppm) + self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1) + + self.conv_last = nn.Sequential( + nn.Conv2d(fc_dim + len(pool_scales) * 512, 512, + kernel_size=3, padding=1, bias=False), + BatchNorm2d(512), + nn.ReLU(inplace=True), + nn.Dropout2d(0.1), + nn.Conv2d(512, num_class, kernel_size=1) + ) + self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) + self.dropout_deepsup = nn.Dropout2d(0.1) + + def forward(self, conv_out, segSize=None): + conv5 = conv_out[-1] + + input_size = conv5.size() + ppm_out = [conv5] + for pool_scale in self.ppm: + ppm_out.append(nn.functional.interpolate( + pool_scale(conv5), + (input_size[2], input_size[3]), + mode='bilinear', align_corners=False)) + ppm_out = torch.cat(ppm_out, 1) + + if self.drop_last_conv: + return ppm_out + else: + x = self.conv_last(ppm_out) + + if self.use_softmax: # is True during inference + x = nn.functional.interpolate( + x, size=segSize, mode='bilinear', align_corners=False) + x = nn.functional.softmax(x, dim=1) + return x + + # deep sup + conv4 = conv_out[-2] + _ = self.cbr_deepsup(conv4) + _ = self.dropout_deepsup(_) + _ = self.conv_last_deepsup(_) + + x = nn.functional.log_softmax(x, dim=1) + _ = nn.functional.log_softmax(_, dim=1) + + return (x, _) + + +class Resnet(nn.Module): + def __init__(self, orig_resnet): + super(Resnet, self).__init__() + + # take pretrained resnet, except AvgPool and FC + self.conv1 = orig_resnet.conv1 + self.bn1 = orig_resnet.bn1 + self.relu1 = orig_resnet.relu1 + self.conv2 = orig_resnet.conv2 + self.bn2 = orig_resnet.bn2 + self.relu2 = orig_resnet.relu2 + self.conv3 = orig_resnet.conv3 + self.bn3 = orig_resnet.bn3 + self.relu3 = orig_resnet.relu3 + self.maxpool = orig_resnet.maxpool + self.layer1 = orig_resnet.layer1 + self.layer2 = orig_resnet.layer2 + self.layer3 = orig_resnet.layer3 + self.layer4 = orig_resnet.layer4 + + def forward(self, x, return_feature_maps=False): + conv_out = [] + + x = self.relu1(self.bn1(self.conv1(x))) + x = self.relu2(self.bn2(self.conv2(x))) + x = self.relu3(self.bn3(self.conv3(x))) + x = self.maxpool(x) + + x = self.layer1(x); conv_out.append(x); + x = self.layer2(x); conv_out.append(x); + x = self.layer3(x); conv_out.append(x); + x = self.layer4(x); conv_out.append(x); + + if return_feature_maps: + return conv_out + return [x] + +# Resnet Dilated +class ResnetDilated(nn.Module): + def __init__(self, orig_resnet, dilate_scale=8): + super().__init__() + from functools import partial + + if dilate_scale == 8: + orig_resnet.layer3.apply( + partial(self._nostride_dilate, dilate=2)) + orig_resnet.layer4.apply( + partial(self._nostride_dilate, dilate=4)) + elif dilate_scale == 16: + orig_resnet.layer4.apply( + partial(self._nostride_dilate, dilate=2)) + + # take pretrained resnet, except AvgPool and FC + self.conv1 = orig_resnet.conv1 + self.bn1 = orig_resnet.bn1 + self.relu1 = orig_resnet.relu1 + self.conv2 = orig_resnet.conv2 + self.bn2 = orig_resnet.bn2 + self.relu2 = orig_resnet.relu2 + self.conv3 = orig_resnet.conv3 + self.bn3 = orig_resnet.bn3 + self.relu3 = orig_resnet.relu3 + self.maxpool = orig_resnet.maxpool + self.layer1 = orig_resnet.layer1 + self.layer2 = orig_resnet.layer2 + self.layer3 = orig_resnet.layer3 + self.layer4 = orig_resnet.layer4 + + def _nostride_dilate(self, m, dilate): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + # the convolution with stride + if m.stride == (2, 2): + m.stride = (1, 1) + if m.kernel_size == (3, 3): + m.dilation = (dilate // 2, dilate // 2) + m.padding = (dilate // 2, dilate // 2) + # other convoluions + else: + if m.kernel_size == (3, 3): + m.dilation = (dilate, dilate) + m.padding = (dilate, dilate) + + def forward(self, x, return_feature_maps=False): + conv_out = [] + + x = self.relu1(self.bn1(self.conv1(x))) + x = self.relu2(self.bn2(self.conv2(x))) + x = self.relu3(self.bn3(self.conv3(x))) + x = self.maxpool(x) + + x = self.layer1(x) + conv_out.append(x) + x = self.layer2(x) + conv_out.append(x) + x = self.layer3(x) + conv_out.append(x) + x = self.layer4(x) + conv_out.append(x) + + if return_feature_maps: + return conv_out + return [x] + +class MobileNetV2Dilated(nn.Module): + def __init__(self, orig_net, dilate_scale=8): + super(MobileNetV2Dilated, self).__init__() + from functools import partial + + # take pretrained mobilenet features + self.features = orig_net.features[:-1] + + self.total_idx = len(self.features) + self.down_idx = [2, 4, 7, 14] + + if dilate_scale == 8: + for i in range(self.down_idx[-2], self.down_idx[-1]): + self.features[i].apply( + partial(self._nostride_dilate, dilate=2) + ) + for i in range(self.down_idx[-1], self.total_idx): + self.features[i].apply( + partial(self._nostride_dilate, dilate=4) + ) + elif dilate_scale == 16: + for i in range(self.down_idx[-1], self.total_idx): + self.features[i].apply( + partial(self._nostride_dilate, dilate=2) + ) + + def _nostride_dilate(self, m, dilate): + classname = m.__class__.__name__ + if classname.find('Conv') != -1: + # the convolution with stride + if m.stride == (2, 2): + m.stride = (1, 1) + if m.kernel_size == (3, 3): + m.dilation = (dilate//2, dilate//2) + m.padding = (dilate//2, dilate//2) + # other convoluions + else: + if m.kernel_size == (3, 3): + m.dilation = (dilate, dilate) + m.padding = (dilate, dilate) + + def forward(self, x, return_feature_maps=False): + if return_feature_maps: + conv_out = [] + for i in range(self.total_idx): + x = self.features[i](x) + if i in self.down_idx: + conv_out.append(x) + conv_out.append(x) + return conv_out + + else: + return [self.features(x)] + + +# last conv, deep supervision +class C1DeepSup(nn.Module): + def __init__(self, num_class=150, fc_dim=2048, use_softmax=False, drop_last_conv=False): + super(C1DeepSup, self).__init__() + self.use_softmax = use_softmax + self.drop_last_conv = drop_last_conv + + self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1) + self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1) + + # last conv + self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) + self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) + + def forward(self, conv_out, segSize=None): + conv5 = conv_out[-1] + + x = self.cbr(conv5) + + if self.drop_last_conv: + return x + else: + x = self.conv_last(x) + + if self.use_softmax: # is True during inference + x = nn.functional.interpolate( + x, size=segSize, mode='bilinear', align_corners=False) + x = nn.functional.softmax(x, dim=1) + return x + + # deep sup + conv4 = conv_out[-2] + _ = self.cbr_deepsup(conv4) + _ = self.conv_last_deepsup(_) + + x = nn.functional.log_softmax(x, dim=1) + _ = nn.functional.log_softmax(_, dim=1) + + return (x, _) + + +# last conv +class C1(nn.Module): + def __init__(self, num_class=150, fc_dim=2048, use_softmax=False): + super(C1, self).__init__() + self.use_softmax = use_softmax + + self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1) + + # last conv + self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0) + + def forward(self, conv_out, segSize=None): + conv5 = conv_out[-1] + x = self.cbr(conv5) + x = self.conv_last(x) + + if self.use_softmax: # is True during inference + x = nn.functional.interpolate( + x, size=segSize, mode='bilinear', align_corners=False) + x = nn.functional.softmax(x, dim=1) + else: + x = nn.functional.log_softmax(x, dim=1) + + return x + + +# pyramid pooling +class PPM(nn.Module): + def __init__(self, num_class=150, fc_dim=4096, + use_softmax=False, pool_scales=(1, 2, 3, 6)): + super(PPM, self).__init__() + self.use_softmax = use_softmax + + self.ppm = [] + for scale in pool_scales: + self.ppm.append(nn.Sequential( + nn.AdaptiveAvgPool2d(scale), + nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False), + BatchNorm2d(512), + nn.ReLU(inplace=True) + )) + self.ppm = nn.ModuleList(self.ppm) + + self.conv_last = nn.Sequential( + nn.Conv2d(fc_dim+len(pool_scales)*512, 512, + kernel_size=3, padding=1, bias=False), + BatchNorm2d(512), + nn.ReLU(inplace=True), + nn.Dropout2d(0.1), + nn.Conv2d(512, num_class, kernel_size=1) + ) + + def forward(self, conv_out, segSize=None): + conv5 = conv_out[-1] + + input_size = conv5.size() + ppm_out = [conv5] + for pool_scale in self.ppm: + ppm_out.append(nn.functional.interpolate( + pool_scale(conv5), + (input_size[2], input_size[3]), + mode='bilinear', align_corners=False)) + ppm_out = torch.cat(ppm_out, 1) + + x = self.conv_last(ppm_out) + + if self.use_softmax: # is True during inference + x = nn.functional.interpolate( + x, size=segSize, mode='bilinear', align_corners=False) + x = nn.functional.softmax(x, dim=1) + else: + x = nn.functional.log_softmax(x, dim=1) + return x diff --git a/lama/models/ade20k/color150.mat b/lama/models/ade20k/color150.mat new file mode 100644 index 0000000000000000000000000000000000000000..c518b64fbbe899d4a8b2705f012eeba795339892 Binary files /dev/null and b/lama/models/ade20k/color150.mat differ diff --git a/lama/models/ade20k/mobilenet.py b/lama/models/ade20k/mobilenet.py new file mode 100644 index 0000000000000000000000000000000000000000..f501266e56ee71cdf455744020f8fc1a58ec9fff --- /dev/null +++ b/lama/models/ade20k/mobilenet.py @@ -0,0 +1,154 @@ +""" +This MobileNetV2 implementation is modified from the following repository: +https://github.com/tonylins/pytorch-mobilenet-v2 +""" + +import torch.nn as nn +import math +from .utils import load_url +from .segm_lib.nn import SynchronizedBatchNorm2d + +BatchNorm2d = SynchronizedBatchNorm2d + + +__all__ = ['mobilenetv2'] + + +model_urls = { + 'mobilenetv2': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/mobilenet_v2.pth.tar', +} + + +def conv_bn(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +def conv_1x1_bn(inp, oup): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + BatchNorm2d(oup), + nn.ReLU6(inplace=True) + ) + + +class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, expand_ratio): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + hidden_dim = round(inp * expand_ratio) + self.use_res_connect = self.stride == 1 and inp == oup + + if expand_ratio == 1: + self.conv = nn.Sequential( + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + BatchNorm2d(oup), + ) + else: + self.conv = nn.Sequential( + # pw + nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), + BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # dw + nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), + BatchNorm2d(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + BatchNorm2d(oup), + ) + + def forward(self, x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__(self, n_class=1000, input_size=224, width_mult=1.): + super(MobileNetV2, self).__init__() + block = InvertedResidual + input_channel = 32 + last_channel = 1280 + interverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1], + ] + + # building first layer + assert input_size % 32 == 0 + input_channel = int(input_channel * width_mult) + self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel + self.features = [conv_bn(3, input_channel, 2)] + # building inverted residual blocks + for t, c, n, s in interverted_residual_setting: + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: + self.features.append(block(input_channel, output_channel, s, expand_ratio=t)) + else: + self.features.append(block(input_channel, output_channel, 1, expand_ratio=t)) + input_channel = output_channel + # building last several layers + self.features.append(conv_1x1_bn(input_channel, self.last_channel)) + # make it nn.Sequential + self.features = nn.Sequential(*self.features) + + # building classifier + self.classifier = nn.Sequential( + nn.Dropout(0.2), + nn.Linear(self.last_channel, n_class), + ) + + self._initialize_weights() + + def forward(self, x): + x = self.features(x) + x = x.mean(3).mean(2) + x = self.classifier(x) + return x + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(1) + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + + +def mobilenetv2(pretrained=False, **kwargs): + """Constructs a MobileNet_V2 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = MobileNetV2(n_class=1000, **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['mobilenetv2']), strict=False) + return model \ No newline at end of file diff --git a/lama/models/ade20k/object150_info.csv b/lama/models/ade20k/object150_info.csv new file mode 100644 index 0000000000000000000000000000000000000000..8b34d8f3874a38b96894863c5458a7c3c2b0e2e6 --- /dev/null +++ b/lama/models/ade20k/object150_info.csv @@ -0,0 +1,151 @@ +Idx,Ratio,Train,Val,Stuff,Name +1,0.1576,11664,1172,1,wall +2,0.1072,6046,612,1,building;edifice +3,0.0878,8265,796,1,sky +4,0.0621,9336,917,1,floor;flooring +5,0.0480,6678,641,0,tree +6,0.0450,6604,643,1,ceiling +7,0.0398,4023,408,1,road;route +8,0.0231,1906,199,0,bed +9,0.0198,4688,460,0,windowpane;window +10,0.0183,2423,225,1,grass +11,0.0181,2874,294,0,cabinet +12,0.0166,3068,310,1,sidewalk;pavement +13,0.0160,5075,526,0,person;individual;someone;somebody;mortal;soul +14,0.0151,1804,190,1,earth;ground +15,0.0118,6666,796,0,door;double;door +16,0.0110,4269,411,0,table +17,0.0109,1691,160,1,mountain;mount +18,0.0104,3999,441,0,plant;flora;plant;life +19,0.0104,2149,217,0,curtain;drape;drapery;mantle;pall +20,0.0103,3261,318,0,chair +21,0.0098,3164,306,0,car;auto;automobile;machine;motorcar +22,0.0074,709,75,1,water +23,0.0067,3296,315,0,painting;picture +24,0.0065,1191,106,0,sofa;couch;lounge +25,0.0061,1516,162,0,shelf +26,0.0060,667,69,1,house +27,0.0053,651,57,1,sea +28,0.0052,1847,224,0,mirror +29,0.0046,1158,128,1,rug;carpet;carpeting +30,0.0044,480,44,1,field +31,0.0044,1172,98,0,armchair +32,0.0044,1292,184,0,seat +33,0.0033,1386,138,0,fence;fencing +34,0.0031,698,61,0,desk +35,0.0030,781,73,0,rock;stone +36,0.0027,380,43,0,wardrobe;closet;press +37,0.0026,3089,302,0,lamp +38,0.0024,404,37,0,bathtub;bathing;tub;bath;tub +39,0.0024,804,99,0,railing;rail +40,0.0023,1453,153,0,cushion +41,0.0023,411,37,0,base;pedestal;stand +42,0.0022,1440,162,0,box +43,0.0022,800,77,0,column;pillar +44,0.0020,2650,298,0,signboard;sign +45,0.0019,549,46,0,chest;of;drawers;chest;bureau;dresser +46,0.0019,367,36,0,counter +47,0.0018,311,30,1,sand +48,0.0018,1181,122,0,sink +49,0.0018,287,23,1,skyscraper +50,0.0018,468,38,0,fireplace;hearth;open;fireplace +51,0.0018,402,43,0,refrigerator;icebox +52,0.0018,130,12,1,grandstand;covered;stand +53,0.0018,561,64,1,path +54,0.0017,880,102,0,stairs;steps +55,0.0017,86,12,1,runway +56,0.0017,172,11,0,case;display;case;showcase;vitrine +57,0.0017,198,18,0,pool;table;billiard;table;snooker;table +58,0.0017,930,109,0,pillow +59,0.0015,139,18,0,screen;door;screen +60,0.0015,564,52,1,stairway;staircase +61,0.0015,320,26,1,river +62,0.0015,261,29,1,bridge;span +63,0.0014,275,22,0,bookcase +64,0.0014,335,60,0,blind;screen +65,0.0014,792,75,0,coffee;table;cocktail;table +66,0.0014,395,49,0,toilet;can;commode;crapper;pot;potty;stool;throne +67,0.0014,1309,138,0,flower +68,0.0013,1112,113,0,book +69,0.0013,266,27,1,hill +70,0.0013,659,66,0,bench +71,0.0012,331,31,0,countertop +72,0.0012,531,56,0,stove;kitchen;stove;range;kitchen;range;cooking;stove +73,0.0012,369,36,0,palm;palm;tree +74,0.0012,144,9,0,kitchen;island +75,0.0011,265,29,0,computer;computing;machine;computing;device;data;processor;electronic;computer;information;processing;system +76,0.0010,324,33,0,swivel;chair +77,0.0009,304,27,0,boat +78,0.0009,170,20,0,bar +79,0.0009,68,6,0,arcade;machine +80,0.0009,65,8,1,hovel;hut;hutch;shack;shanty +81,0.0009,248,25,0,bus;autobus;coach;charabanc;double-decker;jitney;motorbus;motorcoach;omnibus;passenger;vehicle +82,0.0008,492,49,0,towel +83,0.0008,2510,269,0,light;light;source +84,0.0008,440,39,0,truck;motortruck +85,0.0008,147,18,1,tower +86,0.0008,583,56,0,chandelier;pendant;pendent +87,0.0007,533,61,0,awning;sunshade;sunblind +88,0.0007,1989,239,0,streetlight;street;lamp +89,0.0007,71,5,0,booth;cubicle;stall;kiosk +90,0.0007,618,53,0,television;television;receiver;television;set;tv;tv;set;idiot;box;boob;tube;telly;goggle;box +91,0.0007,135,12,0,airplane;aeroplane;plane +92,0.0007,83,5,1,dirt;track +93,0.0007,178,17,0,apparel;wearing;apparel;dress;clothes +94,0.0006,1003,104,0,pole +95,0.0006,182,12,1,land;ground;soil +96,0.0006,452,50,0,bannister;banister;balustrade;balusters;handrail +97,0.0006,42,6,1,escalator;moving;staircase;moving;stairway +98,0.0006,307,31,0,ottoman;pouf;pouffe;puff;hassock +99,0.0006,965,114,0,bottle +100,0.0006,117,13,0,buffet;counter;sideboard +101,0.0006,354,35,0,poster;posting;placard;notice;bill;card +102,0.0006,108,9,1,stage +103,0.0006,557,55,0,van +104,0.0006,52,4,0,ship +105,0.0005,99,5,0,fountain +106,0.0005,57,4,1,conveyer;belt;conveyor;belt;conveyer;conveyor;transporter +107,0.0005,292,31,0,canopy +108,0.0005,77,9,0,washer;automatic;washer;washing;machine +109,0.0005,340,38,0,plaything;toy +110,0.0005,66,3,1,swimming;pool;swimming;bath;natatorium +111,0.0005,465,49,0,stool +112,0.0005,50,4,0,barrel;cask +113,0.0005,622,75,0,basket;handbasket +114,0.0005,80,9,1,waterfall;falls +115,0.0005,59,3,0,tent;collapsible;shelter +116,0.0005,531,72,0,bag +117,0.0005,282,30,0,minibike;motorbike +118,0.0005,73,7,0,cradle +119,0.0005,435,44,0,oven +120,0.0005,136,25,0,ball +121,0.0005,116,24,0,food;solid;food +122,0.0004,266,31,0,step;stair +123,0.0004,58,12,0,tank;storage;tank +124,0.0004,418,83,0,trade;name;brand;name;brand;marque +125,0.0004,319,43,0,microwave;microwave;oven +126,0.0004,1193,139,0,pot;flowerpot +127,0.0004,97,23,0,animal;animate;being;beast;brute;creature;fauna +128,0.0004,347,36,0,bicycle;bike;wheel;cycle +129,0.0004,52,5,1,lake +130,0.0004,246,22,0,dishwasher;dish;washer;dishwashing;machine +131,0.0004,108,13,0,screen;silver;screen;projection;screen +132,0.0004,201,30,0,blanket;cover +133,0.0004,285,21,0,sculpture +134,0.0004,268,27,0,hood;exhaust;hood +135,0.0003,1020,108,0,sconce +136,0.0003,1282,122,0,vase +137,0.0003,528,65,0,traffic;light;traffic;signal;stoplight +138,0.0003,453,57,0,tray +139,0.0003,671,100,0,ashcan;trash;can;garbage;can;wastebin;ash;bin;ash-bin;ashbin;dustbin;trash;barrel;trash;bin +140,0.0003,397,44,0,fan +141,0.0003,92,8,1,pier;wharf;wharfage;dock +142,0.0003,228,18,0,crt;screen +143,0.0003,570,59,0,plate +144,0.0003,217,22,0,monitor;monitoring;device +145,0.0003,206,19,0,bulletin;board;notice;board +146,0.0003,130,14,0,shower +147,0.0003,178,28,0,radiator +148,0.0002,504,57,0,glass;drinking;glass +149,0.0002,775,96,0,clock +150,0.0002,421,56,0,flag diff --git a/lama/models/ade20k/resnet.py b/lama/models/ade20k/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..3e1d521f171c984cf6a7ff3dcebd96f8c5faf908 --- /dev/null +++ b/lama/models/ade20k/resnet.py @@ -0,0 +1,181 @@ +"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch""" + +import math + +import torch.nn as nn +from torch.nn import BatchNorm2d + +from .utils import load_url + +__all__ = ['ResNet', 'resnet50'] + + +model_urls = { + 'resnet50': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet50-imagenet.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000): + self.inplanes = 128 + super(ResNet, self).__init__() + self.conv1 = conv3x3(3, 64, stride=2) + self.bn1 = BatchNorm2d(64) + self.relu1 = nn.ReLU(inplace=True) + self.conv2 = conv3x3(64, 64) + self.bn2 = BatchNorm2d(64) + self.relu2 = nn.ReLU(inplace=True) + self.conv3 = conv3x3(64, 128) + self.bn3 = BatchNorm2d(128) + self.relu3 = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AvgPool2d(7, stride=1) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.relu1(self.bn1(self.conv1(x))) + x = self.relu2(self.bn2(self.conv2(x))) + x = self.relu3(self.bn3(self.conv3(x))) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +def resnet50(pretrained=False, **kwargs): + """Constructs a ResNet-50 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['resnet50']), strict=False) + return model + + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(load_url(model_urls['resnet18'])) + return model \ No newline at end of file diff --git a/lama/models/ade20k/segm_lib/nn/__init__.py b/lama/models/ade20k/segm_lib/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..98a96370ef04570f516052bb73f568d0ebc346c3 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/__init__.py @@ -0,0 +1,2 @@ +from .modules import * +from .parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to diff --git a/lama/models/ade20k/segm_lib/nn/modules/__init__.py b/lama/models/ade20k/segm_lib/nn/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8709d92c610b36e0bcbd7da20c1eb41dc8cfcf --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/modules/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# File : __init__.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +from .batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, SynchronizedBatchNorm3d +from .replicate import DataParallelWithCallback, patch_replication_callback diff --git a/lama/models/ade20k/segm_lib/nn/modules/batchnorm.py b/lama/models/ade20k/segm_lib/nn/modules/batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..18318965335b37cc671004a6aceda3229dc7b477 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/modules/batchnorm.py @@ -0,0 +1,329 @@ +# -*- coding: utf-8 -*- +# File : batchnorm.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import collections + +import torch +import torch.nn.functional as F + +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast + +from .comm import SyncMaster + +__all__ = ['SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d'] + + +def _sum_ft(tensor): + """sum over the first and last dimention""" + return tensor.sum(dim=0).sum(dim=-1) + + +def _unsqueeze_ft(tensor): + """add new dementions at the front and the tail""" + return tensor.unsqueeze(0).unsqueeze(-1) + + +_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size']) +_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std']) + + +class _SynchronizedBatchNorm(_BatchNorm): + def __init__(self, num_features, eps=1e-5, momentum=0.001, affine=True): + super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) + + self._sync_master = SyncMaster(self._data_parallel_master) + + self._is_parallel = False + self._parallel_id = None + self._slave_pipe = None + + # customed batch norm statistics + self._moving_average_fraction = 1. - momentum + self.register_buffer('_tmp_running_mean', torch.zeros(self.num_features)) + self.register_buffer('_tmp_running_var', torch.ones(self.num_features)) + self.register_buffer('_running_iter', torch.ones(1)) + self._tmp_running_mean = self.running_mean.clone() * self._running_iter + self._tmp_running_var = self.running_var.clone() * self._running_iter + + def forward(self, input): + # If it is not parallel computation or is in evaluation mode, use PyTorch's implementation. + if not (self._is_parallel and self.training): + return F.batch_norm( + input, self.running_mean, self.running_var, self.weight, self.bias, + self.training, self.momentum, self.eps) + + # Resize the input to (B, C, -1). + input_shape = input.size() + input = input.view(input.size(0), self.num_features, -1) + + # Compute the sum and square-sum. + sum_size = input.size(0) * input.size(2) + input_sum = _sum_ft(input) + input_ssum = _sum_ft(input ** 2) + + # Reduce-and-broadcast the statistics. + if self._parallel_id == 0: + mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size)) + else: + mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size)) + + # Compute the output. + if self.affine: + # MJY:: Fuse the multiplication for speed. + output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias) + else: + output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std) + + # Reshape it. + return output.view(input_shape) + + def __data_parallel_replicate__(self, ctx, copy_id): + self._is_parallel = True + self._parallel_id = copy_id + + # parallel_id == 0 means master device. + if self._parallel_id == 0: + ctx.sync_master = self._sync_master + else: + self._slave_pipe = ctx.sync_master.register_slave(copy_id) + + def _data_parallel_master(self, intermediates): + """Reduce the sum and square-sum, compute the statistics, and broadcast it.""" + intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device()) + + to_reduce = [i[1][:2] for i in intermediates] + to_reduce = [j for i in to_reduce for j in i] # flatten + target_gpus = [i[1].sum.get_device() for i in intermediates] + + sum_size = sum([i[1].sum_size for i in intermediates]) + sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) + + mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size) + + broadcasted = Broadcast.apply(target_gpus, mean, inv_std) + + outputs = [] + for i, rec in enumerate(intermediates): + outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2]))) + + return outputs + + def _add_weighted(self, dest, delta, alpha=1, beta=1, bias=0): + """return *dest* by `dest := dest*alpha + delta*beta + bias`""" + return dest * alpha + delta * beta + bias + + def _compute_mean_std(self, sum_, ssum, size): + """Compute the mean and standard-deviation with sum and square-sum. This method + also maintains the moving average on the master device.""" + assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.' + mean = sum_ / size + sumvar = ssum - sum_ * mean + unbias_var = sumvar / (size - 1) + bias_var = sumvar / size + + self._tmp_running_mean = self._add_weighted(self._tmp_running_mean, mean.data, alpha=self._moving_average_fraction) + self._tmp_running_var = self._add_weighted(self._tmp_running_var, unbias_var.data, alpha=self._moving_average_fraction) + self._running_iter = self._add_weighted(self._running_iter, 1, alpha=self._moving_average_fraction) + + self.running_mean = self._tmp_running_mean / self._running_iter + self.running_var = self._tmp_running_var / self._running_iter + + return mean, bias_var.clamp(self.eps) ** -0.5 + + +class SynchronizedBatchNorm1d(_SynchronizedBatchNorm): + r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a + mini-batch. + + .. math:: + + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta + + This module differs from the built-in PyTorch BatchNorm1d as the mean and + standard-deviation are reduced across all devices during training. + + For example, when one uses `nn.DataParallel` to wrap the network during + training, PyTorch's implementation normalize the tensor on each device using + the statistics only on that device, which accelerated the computation and + is also easy to implement, but the statistics might be inaccurate. + Instead, in this synchronized version, the statistics will be computed + over all training samples distributed on multiple devices. + + Note that, for one-GPU or CPU-only case, this module behaves exactly same + as the built-in PyTorch implementation. + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and gamma and beta are learnable parameter vectors + of size C (where C is the input size). + + During training, this layer keeps a running estimate of its computed mean + and variance. The running sum is kept with a default momentum of 0.1. + + During evaluation, this running mean/variance is used for normalization. + + Because the BatchNorm is done over the `C` dimension, computing statistics + on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm + + Args: + num_features: num_features from an expected input of size + `batch_size x num_features [x width]` + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + affine: a boolean value that when set to ``True``, gives the layer learnable + affine parameters. Default: ``True`` + + Shape: + - Input: :math:`(N, C)` or :math:`(N, C, L)` + - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input) + + Examples: + >>> # With Learnable Parameters + >>> m = SynchronizedBatchNorm1d(100) + >>> # Without Learnable Parameters + >>> m = SynchronizedBatchNorm1d(100, affine=False) + >>> input = torch.autograd.Variable(torch.randn(20, 100)) + >>> output = m(input) + """ + + def _check_input_dim(self, input): + if input.dim() != 2 and input.dim() != 3: + raise ValueError('expected 2D or 3D input (got {}D input)' + .format(input.dim())) + super(SynchronizedBatchNorm1d, self)._check_input_dim(input) + + +class SynchronizedBatchNorm2d(_SynchronizedBatchNorm): + r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch + of 3d inputs + + .. math:: + + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta + + This module differs from the built-in PyTorch BatchNorm2d as the mean and + standard-deviation are reduced across all devices during training. + + For example, when one uses `nn.DataParallel` to wrap the network during + training, PyTorch's implementation normalize the tensor on each device using + the statistics only on that device, which accelerated the computation and + is also easy to implement, but the statistics might be inaccurate. + Instead, in this synchronized version, the statistics will be computed + over all training samples distributed on multiple devices. + + Note that, for one-GPU or CPU-only case, this module behaves exactly same + as the built-in PyTorch implementation. + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and gamma and beta are learnable parameter vectors + of size C (where C is the input size). + + During training, this layer keeps a running estimate of its computed mean + and variance. The running sum is kept with a default momentum of 0.1. + + During evaluation, this running mean/variance is used for normalization. + + Because the BatchNorm is done over the `C` dimension, computing statistics + on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm + + Args: + num_features: num_features from an expected input of + size batch_size x num_features x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + affine: a boolean value that when set to ``True``, gives the layer learnable + affine parameters. Default: ``True`` + + Shape: + - Input: :math:`(N, C, H, W)` + - Output: :math:`(N, C, H, W)` (same shape as input) + + Examples: + >>> # With Learnable Parameters + >>> m = SynchronizedBatchNorm2d(100) + >>> # Without Learnable Parameters + >>> m = SynchronizedBatchNorm2d(100, affine=False) + >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45)) + >>> output = m(input) + """ + + def _check_input_dim(self, input): + if input.dim() != 4: + raise ValueError('expected 4D input (got {}D input)' + .format(input.dim())) + super(SynchronizedBatchNorm2d, self)._check_input_dim(input) + + +class SynchronizedBatchNorm3d(_SynchronizedBatchNorm): + r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch + of 4d inputs + + .. math:: + + y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta + + This module differs from the built-in PyTorch BatchNorm3d as the mean and + standard-deviation are reduced across all devices during training. + + For example, when one uses `nn.DataParallel` to wrap the network during + training, PyTorch's implementation normalize the tensor on each device using + the statistics only on that device, which accelerated the computation and + is also easy to implement, but the statistics might be inaccurate. + Instead, in this synchronized version, the statistics will be computed + over all training samples distributed on multiple devices. + + Note that, for one-GPU or CPU-only case, this module behaves exactly same + as the built-in PyTorch implementation. + + The mean and standard-deviation are calculated per-dimension over + the mini-batches and gamma and beta are learnable parameter vectors + of size C (where C is the input size). + + During training, this layer keeps a running estimate of its computed mean + and variance. The running sum is kept with a default momentum of 0.1. + + During evaluation, this running mean/variance is used for normalization. + + Because the BatchNorm is done over the `C` dimension, computing statistics + on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm + or Spatio-temporal BatchNorm + + Args: + num_features: num_features from an expected input of + size batch_size x num_features x depth x height x width + eps: a value added to the denominator for numerical stability. + Default: 1e-5 + momentum: the value used for the running_mean and running_var + computation. Default: 0.1 + affine: a boolean value that when set to ``True``, gives the layer learnable + affine parameters. Default: ``True`` + + Shape: + - Input: :math:`(N, C, D, H, W)` + - Output: :math:`(N, C, D, H, W)` (same shape as input) + + Examples: + >>> # With Learnable Parameters + >>> m = SynchronizedBatchNorm3d(100) + >>> # Without Learnable Parameters + >>> m = SynchronizedBatchNorm3d(100, affine=False) + >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10)) + >>> output = m(input) + """ + + def _check_input_dim(self, input): + if input.dim() != 5: + raise ValueError('expected 5D input (got {}D input)' + .format(input.dim())) + super(SynchronizedBatchNorm3d, self)._check_input_dim(input) diff --git a/lama/models/ade20k/segm_lib/nn/modules/comm.py b/lama/models/ade20k/segm_lib/nn/modules/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..b64bf6ba3b3e7abbab375c6dd4a87d8239e62138 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/modules/comm.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# File : comm.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import queue +import collections +import threading + +__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] + + +class FutureResult(object): + """A thread-safe future implementation. Used only as one-to-one pipe.""" + + def __init__(self): + self._result = None + self._lock = threading.Lock() + self._cond = threading.Condition(self._lock) + + def put(self, result): + with self._lock: + assert self._result is None, 'Previous result has\'t been fetched.' + self._result = result + self._cond.notify() + + def get(self): + with self._lock: + if self._result is None: + self._cond.wait() + + res = self._result + self._result = None + return res + + +_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) +_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) + + +class SlavePipe(_SlavePipeBase): + """Pipe for master-slave communication.""" + + def run_slave(self, msg): + self.queue.put((self.identifier, msg)) + ret = self.result.get() + self.queue.put(True) + return ret + + +class SyncMaster(object): + """An abstract `SyncMaster` object. + + - During the replication, as the data parallel will trigger an callback of each module, all slave devices should + call `register(id)` and obtain an `SlavePipe` to communicate with the master. + - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, + and passed to a registered callback. + - After receiving the messages, the master device should gather the information and determine to message passed + back to each slave devices. + """ + + def __init__(self, master_callback): + """ + + Args: + master_callback: a callback to be invoked after having collected messages from slave devices. + """ + self._master_callback = master_callback + self._queue = queue.Queue() + self._registry = collections.OrderedDict() + self._activated = False + + def register_slave(self, identifier): + """ + Register an slave device. + + Args: + identifier: an identifier, usually is the device id. + + Returns: a `SlavePipe` object which can be used to communicate with the master device. + + """ + if self._activated: + assert self._queue.empty(), 'Queue is not clean before next initialization.' + self._activated = False + self._registry.clear() + future = FutureResult() + self._registry[identifier] = _MasterRegistry(future) + return SlavePipe(identifier, self._queue, future) + + def run_master(self, master_msg): + """ + Main entry for the master device in each forward pass. + The messages were first collected from each devices (including the master device), and then + an callback will be invoked to compute the message to be sent back to each devices + (including the master device). + + Args: + master_msg: the message that the master want to send to itself. This will be placed as the first + message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. + + Returns: the message to be sent back to the master device. + + """ + self._activated = True + + intermediates = [(0, master_msg)] + for i in range(self.nr_slaves): + intermediates.append(self._queue.get()) + + results = self._master_callback(intermediates) + assert results[0][0] == 0, 'The first result should belongs to the master.' + + for i, res in results: + if i == 0: + continue + self._registry[i].result.put(res) + + for i in range(self.nr_slaves): + assert self._queue.get() is True + + return results[0][1] + + @property + def nr_slaves(self): + return len(self._registry) diff --git a/lama/models/ade20k/segm_lib/nn/modules/replicate.py b/lama/models/ade20k/segm_lib/nn/modules/replicate.py new file mode 100644 index 0000000000000000000000000000000000000000..b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/modules/replicate.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# File : replicate.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import functools + +from torch.nn.parallel.data_parallel import DataParallel + +__all__ = [ + 'CallbackContext', + 'execute_replication_callbacks', + 'DataParallelWithCallback', + 'patch_replication_callback' +] + + +class CallbackContext(object): + pass + + +def execute_replication_callbacks(modules): + """ + Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. + + The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` + + Note that, as all modules are isomorphism, we assign each sub-module with a context + (shared among multiple copies of this module on different devices). + Through this context, different copies can share some information. + + We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback + of any slave copies. + """ + master_copy = modules[0] + nr_modules = len(list(master_copy.modules())) + ctxs = [CallbackContext() for _ in range(nr_modules)] + + for i, module in enumerate(modules): + for j, m in enumerate(module.modules()): + if hasattr(m, '__data_parallel_replicate__'): + m.__data_parallel_replicate__(ctxs[j], i) + + +class DataParallelWithCallback(DataParallel): + """ + Data Parallel with a replication callback. + + An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by + original `replicate` function. + The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` + + Examples: + > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) + # sync_bn.__data_parallel_replicate__ will be invoked. + """ + + def replicate(self, module, device_ids): + modules = super(DataParallelWithCallback, self).replicate(module, device_ids) + execute_replication_callbacks(modules) + return modules + + +def patch_replication_callback(data_parallel): + """ + Monkey-patch an existing `DataParallel` object. Add the replication callback. + Useful when you have customized `DataParallel` implementation. + + Examples: + > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) + > patch_replication_callback(sync_bn) + # this is equivalent to + > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) + """ + + assert isinstance(data_parallel, DataParallel) + + old_replicate = data_parallel.replicate + + @functools.wraps(old_replicate) + def new_replicate(module, device_ids): + modules = old_replicate(module, device_ids) + execute_replication_callbacks(modules) + return modules + + data_parallel.replicate = new_replicate diff --git a/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py b/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd45a930d3dc84912e58659ee575be08e9038f0 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# File : test_numeric_batchnorm.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. + +import unittest + +import torch +import torch.nn as nn +from torch.autograd import Variable + +from sync_batchnorm.unittest import TorchTestCase + + +def handy_var(a, unbias=True): + n = a.size(0) + asum = a.sum(dim=0) + as_sum = (a ** 2).sum(dim=0) # a square sum + sumvar = as_sum - asum * asum / n + if unbias: + return sumvar / (n - 1) + else: + return sumvar / n + + +class NumericTestCase(TorchTestCase): + def testNumericBatchNorm(self): + a = torch.rand(16, 10) + bn = nn.BatchNorm2d(10, momentum=1, eps=1e-5, affine=False) + bn.train() + + a_var1 = Variable(a, requires_grad=True) + b_var1 = bn(a_var1) + loss1 = b_var1.sum() + loss1.backward() + + a_var2 = Variable(a, requires_grad=True) + a_mean2 = a_var2.mean(dim=0, keepdim=True) + a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-5)) + # a_std2 = torch.sqrt(a_var2.var(dim=0, keepdim=True, unbiased=False) + 1e-5) + b_var2 = (a_var2 - a_mean2) / a_std2 + loss2 = b_var2.sum() + loss2.backward() + + self.assertTensorClose(bn.running_mean, a.mean(dim=0)) + self.assertTensorClose(bn.running_var, handy_var(a)) + self.assertTensorClose(a_var1.data, a_var2.data) + self.assertTensorClose(b_var1.data, b_var2.data) + self.assertTensorClose(a_var1.grad, a_var2.grad) + + +if __name__ == '__main__': + unittest.main() diff --git a/lama/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py b/lama/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..45bb3c8cfd36d8f668e6fde756b17587eab72082 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/modules/tests/test_sync_batchnorm.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# File : test_sync_batchnorm.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. + +import unittest + +import torch +import torch.nn as nn +from torch.autograd import Variable + +from sync_batchnorm import SynchronizedBatchNorm1d, SynchronizedBatchNorm2d, DataParallelWithCallback +from sync_batchnorm.unittest import TorchTestCase + + +def handy_var(a, unbias=True): + n = a.size(0) + asum = a.sum(dim=0) + as_sum = (a ** 2).sum(dim=0) # a square sum + sumvar = as_sum - asum * asum / n + if unbias: + return sumvar / (n - 1) + else: + return sumvar / n + + +def _find_bn(module): + for m in module.modules(): + if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, SynchronizedBatchNorm1d, SynchronizedBatchNorm2d)): + return m + + +class SyncTestCase(TorchTestCase): + def _syncParameters(self, bn1, bn2): + bn1.reset_parameters() + bn2.reset_parameters() + if bn1.affine and bn2.affine: + bn2.weight.data.copy_(bn1.weight.data) + bn2.bias.data.copy_(bn1.bias.data) + + def _checkBatchNormResult(self, bn1, bn2, input, is_train, cuda=False): + """Check the forward and backward for the customized batch normalization.""" + bn1.train(mode=is_train) + bn2.train(mode=is_train) + + if cuda: + input = input.cuda() + + self._syncParameters(_find_bn(bn1), _find_bn(bn2)) + + input1 = Variable(input, requires_grad=True) + output1 = bn1(input1) + output1.sum().backward() + input2 = Variable(input, requires_grad=True) + output2 = bn2(input2) + output2.sum().backward() + + self.assertTensorClose(input1.data, input2.data) + self.assertTensorClose(output1.data, output2.data) + self.assertTensorClose(input1.grad, input2.grad) + self.assertTensorClose(_find_bn(bn1).running_mean, _find_bn(bn2).running_mean) + self.assertTensorClose(_find_bn(bn1).running_var, _find_bn(bn2).running_var) + + def testSyncBatchNormNormalTrain(self): + bn = nn.BatchNorm1d(10) + sync_bn = SynchronizedBatchNorm1d(10) + + self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True) + + def testSyncBatchNormNormalEval(self): + bn = nn.BatchNorm1d(10) + sync_bn = SynchronizedBatchNorm1d(10) + + self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False) + + def testSyncBatchNormSyncTrain(self): + bn = nn.BatchNorm1d(10, eps=1e-5, affine=False) + sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) + + bn.cuda() + sync_bn.cuda() + + self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), True, cuda=True) + + def testSyncBatchNormSyncEval(self): + bn = nn.BatchNorm1d(10, eps=1e-5, affine=False) + sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) + sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) + + bn.cuda() + sync_bn.cuda() + + self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True) + + def testSyncBatchNorm2DSyncTrain(self): + bn = nn.BatchNorm2d(10) + sync_bn = SynchronizedBatchNorm2d(10) + sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) + + bn.cuda() + sync_bn.cuda() + + self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10, 16, 16), True, cuda=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/lama/models/ade20k/segm_lib/nn/modules/unittest.py b/lama/models/ade20k/segm_lib/nn/modules/unittest.py new file mode 100644 index 0000000000000000000000000000000000000000..0675c022e4ba85d38d1f813490f6740150909524 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/modules/unittest.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# File : unittest.py +# Author : Jiayuan Mao +# Email : maojiayuan@gmail.com +# Date : 27/01/2018 +# +# This file is part of Synchronized-BatchNorm-PyTorch. +# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch +# Distributed under MIT License. + +import unittest + +import numpy as np +from torch.autograd import Variable + + +def as_numpy(v): + if isinstance(v, Variable): + v = v.data + return v.cpu().numpy() + + +class TorchTestCase(unittest.TestCase): + def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3): + npa, npb = as_numpy(a), as_numpy(b) + self.assertTrue( + np.allclose(npa, npb, atol=atol), + 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max()) + ) diff --git a/lama/models/ade20k/segm_lib/nn/parallel/__init__.py b/lama/models/ade20k/segm_lib/nn/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9b52f49cc0755562218a460483cbf02514ddd773 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/parallel/__init__.py @@ -0,0 +1 @@ +from .data_parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to diff --git a/lama/models/ade20k/segm_lib/nn/parallel/data_parallel.py b/lama/models/ade20k/segm_lib/nn/parallel/data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..376fc038919aa2a5bd696141e7bb6025d4981306 --- /dev/null +++ b/lama/models/ade20k/segm_lib/nn/parallel/data_parallel.py @@ -0,0 +1,112 @@ +# -*- coding: utf8 -*- + +import torch.cuda as cuda +import torch.nn as nn +import torch +import collections +from torch.nn.parallel._functions import Gather + + +__all__ = ['UserScatteredDataParallel', 'user_scattered_collate', 'async_copy_to'] + + +def async_copy_to(obj, dev, main_stream=None): + if torch.is_tensor(obj): + v = obj.cuda(dev, non_blocking=True) + if main_stream is not None: + v.data.record_stream(main_stream) + return v + elif isinstance(obj, collections.Mapping): + return {k: async_copy_to(o, dev, main_stream) for k, o in obj.items()} + elif isinstance(obj, collections.Sequence): + return [async_copy_to(o, dev, main_stream) for o in obj] + else: + return obj + + +def dict_gather(outputs, target_device, dim=0): + """ + Gathers variables from different GPUs on a specified device + (-1 means the CPU), with dictionary support. + """ + def gather_map(outputs): + out = outputs[0] + if torch.is_tensor(out): + # MJY(20180330) HACK:: force nr_dims > 0 + if out.dim() == 0: + outputs = [o.unsqueeze(0) for o in outputs] + return Gather.apply(target_device, dim, *outputs) + elif out is None: + return None + elif isinstance(out, collections.Mapping): + return {k: gather_map([o[k] for o in outputs]) for k in out} + elif isinstance(out, collections.Sequence): + return type(out)(map(gather_map, zip(*outputs))) + return gather_map(outputs) + + +class DictGatherDataParallel(nn.DataParallel): + def gather(self, outputs, output_device): + return dict_gather(outputs, output_device, dim=self.dim) + + +class UserScatteredDataParallel(DictGatherDataParallel): + def scatter(self, inputs, kwargs, device_ids): + assert len(inputs) == 1 + inputs = inputs[0] + inputs = _async_copy_stream(inputs, device_ids) + inputs = [[i] for i in inputs] + assert len(kwargs) == 0 + kwargs = [{} for _ in range(len(inputs))] + + return inputs, kwargs + + +def user_scattered_collate(batch): + return batch + + +def _async_copy(inputs, device_ids): + nr_devs = len(device_ids) + assert type(inputs) in (tuple, list) + assert len(inputs) == nr_devs + + outputs = [] + for i, dev in zip(inputs, device_ids): + with cuda.device(dev): + outputs.append(async_copy_to(i, dev)) + + return tuple(outputs) + + +def _async_copy_stream(inputs, device_ids): + nr_devs = len(device_ids) + assert type(inputs) in (tuple, list) + assert len(inputs) == nr_devs + + outputs = [] + streams = [_get_stream(d) for d in device_ids] + for i, dev, stream in zip(inputs, device_ids, streams): + with cuda.device(dev): + main_stream = cuda.current_stream() + with cuda.stream(stream): + outputs.append(async_copy_to(i, dev, main_stream=main_stream)) + main_stream.wait_stream(stream) + + return outputs + + +"""Adapted from: torch/nn/parallel/_functions.py""" +# background streams used for copying +_streams = None + + +def _get_stream(device): + """Gets a background stream for copying between CPU and GPU""" + global _streams + if device == -1: + return None + if _streams is None: + _streams = [None] * cuda.device_count() + if _streams[device] is None: _streams[device] = cuda.Stream(device) + return _streams[device] diff --git a/lama/models/ade20k/segm_lib/utils/__init__.py b/lama/models/ade20k/segm_lib/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..abe3cbe49477fe37d4fc16249de8a10f4fb4a013 --- /dev/null +++ b/lama/models/ade20k/segm_lib/utils/__init__.py @@ -0,0 +1 @@ +from .th import * diff --git a/lama/models/ade20k/segm_lib/utils/data/__init__.py b/lama/models/ade20k/segm_lib/utils/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3b008fb13c5e8a84b1b785056e8c4f5226dc976 --- /dev/null +++ b/lama/models/ade20k/segm_lib/utils/data/__init__.py @@ -0,0 +1,3 @@ + +from .dataset import Dataset, TensorDataset, ConcatDataset +from .dataloader import DataLoader diff --git a/lama/models/ade20k/segm_lib/utils/data/dataloader.py b/lama/models/ade20k/segm_lib/utils/data/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..039b9ec3645b2a4626ff47c221e372f32a6ad339 --- /dev/null +++ b/lama/models/ade20k/segm_lib/utils/data/dataloader.py @@ -0,0 +1,425 @@ +import torch +import torch.multiprocessing as multiprocessing +from torch._C import _set_worker_signal_handlers, \ + _remove_worker_pids, _error_if_any_worker_fails +try: + from torch._C import _set_worker_pids +except: + from torch._C import _update_worker_pids as _set_worker_pids +from .sampler import SequentialSampler, RandomSampler, BatchSampler +import signal +import collections +import re +import sys +import threading +import traceback +from torch._six import string_classes, int_classes +import numpy as np + +if sys.version_info[0] == 2: + import Queue as queue +else: + import queue + + +class ExceptionWrapper(object): + r"Wraps an exception plus traceback to communicate across threads" + + def __init__(self, exc_info): + self.exc_type = exc_info[0] + self.exc_msg = "".join(traceback.format_exception(*exc_info)) + + +_use_shared_memory = False +"""Whether to use shared memory in default_collate""" + + +def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): + global _use_shared_memory + _use_shared_memory = True + + # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal + # module's handlers are executed after Python returns from C low-level + # handlers, likely when the same fatal signal happened again already. + # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 + _set_worker_signal_handlers() + + torch.set_num_threads(1) + torch.manual_seed(seed) + np.random.seed(seed) + + if init_fn is not None: + init_fn(worker_id) + + while True: + r = index_queue.get() + if r is None: + break + idx, batch_indices = r + try: + samples = collate_fn([dataset[i] for i in batch_indices]) + except Exception: + data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) + else: + data_queue.put((idx, samples)) + + +def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id): + if pin_memory: + torch.cuda.set_device(device_id) + + while True: + try: + r = in_queue.get() + except Exception: + if done_event.is_set(): + return + raise + if r is None: + break + if isinstance(r[1], ExceptionWrapper): + out_queue.put(r) + continue + idx, batch = r + try: + if pin_memory: + batch = pin_memory_batch(batch) + except Exception: + out_queue.put((idx, ExceptionWrapper(sys.exc_info()))) + else: + out_queue.put((idx, batch)) + +numpy_type_map = { + 'float64': torch.DoubleTensor, + 'float32': torch.FloatTensor, + 'float16': torch.HalfTensor, + 'int64': torch.LongTensor, + 'int32': torch.IntTensor, + 'int16': torch.ShortTensor, + 'int8': torch.CharTensor, + 'uint8': torch.ByteTensor, +} + + +def default_collate(batch): + "Puts each data field into a tensor with outer dimension batch size" + + error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" + elem_type = type(batch[0]) + if torch.is_tensor(batch[0]): + out = None + if _use_shared_memory: + # If we're in a background process, concatenate directly into a + # shared memory tensor to avoid an extra copy + numel = sum([x.numel() for x in batch]) + storage = batch[0].storage()._new_shared(numel) + out = batch[0].new(storage) + return torch.stack(batch, 0, out=out) + elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ + and elem_type.__name__ != 'string_': + elem = batch[0] + if elem_type.__name__ == 'ndarray': + # array of string classes and object + if re.search('[SaUO]', elem.dtype.str) is not None: + raise TypeError(error_msg.format(elem.dtype)) + + return torch.stack([torch.from_numpy(b) for b in batch], 0) + if elem.shape == (): # scalars + py_type = float if elem.dtype.name.startswith('float') else int + return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) + elif isinstance(batch[0], int_classes): + return torch.LongTensor(batch) + elif isinstance(batch[0], float): + return torch.DoubleTensor(batch) + elif isinstance(batch[0], string_classes): + return batch + elif isinstance(batch[0], collections.Mapping): + return {key: default_collate([d[key] for d in batch]) for key in batch[0]} + elif isinstance(batch[0], collections.Sequence): + transposed = zip(*batch) + return [default_collate(samples) for samples in transposed] + + raise TypeError((error_msg.format(type(batch[0])))) + + +def pin_memory_batch(batch): + if torch.is_tensor(batch): + return batch.pin_memory() + elif isinstance(batch, string_classes): + return batch + elif isinstance(batch, collections.Mapping): + return {k: pin_memory_batch(sample) for k, sample in batch.items()} + elif isinstance(batch, collections.Sequence): + return [pin_memory_batch(sample) for sample in batch] + else: + return batch + + +_SIGCHLD_handler_set = False +"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one +handler needs to be set for all DataLoaders in a process.""" + + +def _set_SIGCHLD_handler(): + # Windows doesn't support SIGCHLD handler + if sys.platform == 'win32': + return + # can't set signal in child threads + if not isinstance(threading.current_thread(), threading._MainThread): + return + global _SIGCHLD_handler_set + if _SIGCHLD_handler_set: + return + previous_handler = signal.getsignal(signal.SIGCHLD) + if not callable(previous_handler): + previous_handler = None + + def handler(signum, frame): + # This following call uses `waitid` with WNOHANG from C side. Therefore, + # Python can still get and update the process status successfully. + _error_if_any_worker_fails() + if previous_handler is not None: + previous_handler(signum, frame) + + signal.signal(signal.SIGCHLD, handler) + _SIGCHLD_handler_set = True + + +class DataLoaderIter(object): + "Iterates once over the DataLoader's dataset, as specified by the sampler" + + def __init__(self, loader): + self.dataset = loader.dataset + self.collate_fn = loader.collate_fn + self.batch_sampler = loader.batch_sampler + self.num_workers = loader.num_workers + self.pin_memory = loader.pin_memory and torch.cuda.is_available() + self.timeout = loader.timeout + self.done_event = threading.Event() + + self.sample_iter = iter(self.batch_sampler) + + if self.num_workers > 0: + self.worker_init_fn = loader.worker_init_fn + self.index_queue = multiprocessing.SimpleQueue() + self.worker_result_queue = multiprocessing.SimpleQueue() + self.batches_outstanding = 0 + self.worker_pids_set = False + self.shutdown = False + self.send_idx = 0 + self.rcvd_idx = 0 + self.reorder_dict = {} + + base_seed = torch.LongTensor(1).random_(0, 2**31-1)[0] + self.workers = [ + multiprocessing.Process( + target=_worker_loop, + args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn, + base_seed + i, self.worker_init_fn, i)) + for i in range(self.num_workers)] + + if self.pin_memory or self.timeout > 0: + self.data_queue = queue.Queue() + if self.pin_memory: + maybe_device_id = torch.cuda.current_device() + else: + # do not initialize cuda context if not necessary + maybe_device_id = None + self.worker_manager_thread = threading.Thread( + target=_worker_manager_loop, + args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, + maybe_device_id)) + self.worker_manager_thread.daemon = True + self.worker_manager_thread.start() + else: + self.data_queue = self.worker_result_queue + + for w in self.workers: + w.daemon = True # ensure that the worker exits on process exit + w.start() + + _set_worker_pids(id(self), tuple(w.pid for w in self.workers)) + _set_SIGCHLD_handler() + self.worker_pids_set = True + + # prime the prefetch loop + for _ in range(2 * self.num_workers): + self._put_indices() + + def __len__(self): + return len(self.batch_sampler) + + def _get_batch(self): + if self.timeout > 0: + try: + return self.data_queue.get(timeout=self.timeout) + except queue.Empty: + raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout)) + else: + return self.data_queue.get() + + def __next__(self): + if self.num_workers == 0: # same-process loading + indices = next(self.sample_iter) # may raise StopIteration + batch = self.collate_fn([self.dataset[i] for i in indices]) + if self.pin_memory: + batch = pin_memory_batch(batch) + return batch + + # check if the next sample has already been generated + if self.rcvd_idx in self.reorder_dict: + batch = self.reorder_dict.pop(self.rcvd_idx) + return self._process_next_batch(batch) + + if self.batches_outstanding == 0: + self._shutdown_workers() + raise StopIteration + + while True: + assert (not self.shutdown and self.batches_outstanding > 0) + idx, batch = self._get_batch() + self.batches_outstanding -= 1 + if idx != self.rcvd_idx: + # store out-of-order samples + self.reorder_dict[idx] = batch + continue + return self._process_next_batch(batch) + + next = __next__ # Python 2 compatibility + + def __iter__(self): + return self + + def _put_indices(self): + assert self.batches_outstanding < 2 * self.num_workers + indices = next(self.sample_iter, None) + if indices is None: + return + self.index_queue.put((self.send_idx, indices)) + self.batches_outstanding += 1 + self.send_idx += 1 + + def _process_next_batch(self, batch): + self.rcvd_idx += 1 + self._put_indices() + if isinstance(batch, ExceptionWrapper): + raise batch.exc_type(batch.exc_msg) + return batch + + def __getstate__(self): + # TODO: add limited pickling support for sharing an iterator + # across multiple threads for HOGWILD. + # Probably the best way to do this is by moving the sample pushing + # to a separate thread and then just sharing the data queue + # but signalling the end is tricky without a non-blocking API + raise NotImplementedError("DataLoaderIterator cannot be pickled") + + def _shutdown_workers(self): + try: + if not self.shutdown: + self.shutdown = True + self.done_event.set() + # if worker_manager_thread is waiting to put + while not self.data_queue.empty(): + self.data_queue.get() + for _ in self.workers: + self.index_queue.put(None) + # done_event should be sufficient to exit worker_manager_thread, + # but be safe here and put another None + self.worker_result_queue.put(None) + finally: + # removes pids no matter what + if self.worker_pids_set: + _remove_worker_pids(id(self)) + self.worker_pids_set = False + + def __del__(self): + if self.num_workers > 0: + self._shutdown_workers() + + +class DataLoader(object): + """ + Data loader. Combines a dataset and a sampler, and provides + single- or multi-process iterators over the dataset. + + Arguments: + dataset (Dataset): dataset from which to load the data. + batch_size (int, optional): how many samples per batch to load + (default: 1). + shuffle (bool, optional): set to ``True`` to have the data reshuffled + at every epoch (default: False). + sampler (Sampler, optional): defines the strategy to draw samples from + the dataset. If specified, ``shuffle`` must be False. + batch_sampler (Sampler, optional): like sampler, but returns a batch of + indices at a time. Mutually exclusive with batch_size, shuffle, + sampler, and drop_last. + num_workers (int, optional): how many subprocesses to use for data + loading. 0 means that the data will be loaded in the main process. + (default: 0) + collate_fn (callable, optional): merges a list of samples to form a mini-batch. + pin_memory (bool, optional): If ``True``, the data loader will copy tensors + into CUDA pinned memory before returning them. + drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If ``False`` and + the size of dataset is not divisible by the batch size, then the last batch + will be smaller. (default: False) + timeout (numeric, optional): if positive, the timeout value for collecting a batch + from workers. Should always be non-negative. (default: 0) + worker_init_fn (callable, optional): If not None, this will be called on each + worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as + input, after seeding and before data loading. (default: None) + + .. note:: By default, each worker will have its PyTorch seed set to + ``base_seed + worker_id``, where ``base_seed`` is a long generated + by main process using its RNG. You may use ``torch.initial_seed()`` to access + this value in :attr:`worker_init_fn`, which can be used to set other seeds + (e.g. NumPy) before data loading. + + .. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an + unpicklable object, e.g., a lambda function. + """ + + def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, + num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False, + timeout=0, worker_init_fn=None): + self.dataset = dataset + self.batch_size = batch_size + self.num_workers = num_workers + self.collate_fn = collate_fn + self.pin_memory = pin_memory + self.drop_last = drop_last + self.timeout = timeout + self.worker_init_fn = worker_init_fn + + if timeout < 0: + raise ValueError('timeout option should be non-negative') + + if batch_sampler is not None: + if batch_size > 1 or shuffle or sampler is not None or drop_last: + raise ValueError('batch_sampler is mutually exclusive with ' + 'batch_size, shuffle, sampler, and drop_last') + + if sampler is not None and shuffle: + raise ValueError('sampler is mutually exclusive with shuffle') + + if self.num_workers < 0: + raise ValueError('num_workers cannot be negative; ' + 'use num_workers=0 to disable multiprocessing.') + + if batch_sampler is None: + if sampler is None: + if shuffle: + sampler = RandomSampler(dataset) + else: + sampler = SequentialSampler(dataset) + batch_sampler = BatchSampler(sampler, batch_size, drop_last) + + self.sampler = sampler + self.batch_sampler = batch_sampler + + def __iter__(self): + return DataLoaderIter(self) + + def __len__(self): + return len(self.batch_sampler) diff --git a/lama/models/ade20k/segm_lib/utils/data/dataset.py b/lama/models/ade20k/segm_lib/utils/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..605aa877f7031a5cd2b98c0f831410aa80fddefa --- /dev/null +++ b/lama/models/ade20k/segm_lib/utils/data/dataset.py @@ -0,0 +1,118 @@ +import bisect +import warnings + +from torch._utils import _accumulate +from torch import randperm + + +class Dataset(object): + """An abstract class representing a Dataset. + + All other datasets should subclass it. All subclasses should override + ``__len__``, that provides the size of the dataset, and ``__getitem__``, + supporting integer indexing in range from 0 to len(self) exclusive. + """ + + def __getitem__(self, index): + raise NotImplementedError + + def __len__(self): + raise NotImplementedError + + def __add__(self, other): + return ConcatDataset([self, other]) + + +class TensorDataset(Dataset): + """Dataset wrapping data and target tensors. + + Each sample will be retrieved by indexing both tensors along the first + dimension. + + Arguments: + data_tensor (Tensor): contains sample data. + target_tensor (Tensor): contains sample targets (labels). + """ + + def __init__(self, data_tensor, target_tensor): + assert data_tensor.size(0) == target_tensor.size(0) + self.data_tensor = data_tensor + self.target_tensor = target_tensor + + def __getitem__(self, index): + return self.data_tensor[index], self.target_tensor[index] + + def __len__(self): + return self.data_tensor.size(0) + + +class ConcatDataset(Dataset): + """ + Dataset to concatenate multiple datasets. + Purpose: useful to assemble different existing datasets, possibly + large-scale datasets as the concatenation operation is done in an + on-the-fly manner. + + Arguments: + datasets (iterable): List of datasets to be concatenated + """ + + @staticmethod + def cumsum(sequence): + r, s = [], 0 + for e in sequence: + l = len(e) + r.append(l + s) + s += l + return r + + def __init__(self, datasets): + super(ConcatDataset, self).__init__() + assert len(datasets) > 0, 'datasets should not be an empty iterable' + self.datasets = list(datasets) + self.cumulative_sizes = self.cumsum(self.datasets) + + def __len__(self): + return self.cumulative_sizes[-1] + + def __getitem__(self, idx): + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx][sample_idx] + + @property + def cummulative_sizes(self): + warnings.warn("cummulative_sizes attribute is renamed to " + "cumulative_sizes", DeprecationWarning, stacklevel=2) + return self.cumulative_sizes + + +class Subset(Dataset): + def __init__(self, dataset, indices): + self.dataset = dataset + self.indices = indices + + def __getitem__(self, idx): + return self.dataset[self.indices[idx]] + + def __len__(self): + return len(self.indices) + + +def random_split(dataset, lengths): + """ + Randomly split a dataset into non-overlapping new datasets of given lengths + ds + + Arguments: + dataset (Dataset): Dataset to be split + lengths (iterable): lengths of splits to be produced + """ + if sum(lengths) != len(dataset): + raise ValueError("Sum of input lengths does not equal the length of the input dataset!") + + indices = randperm(sum(lengths)) + return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)] diff --git a/lama/models/ade20k/segm_lib/utils/data/distributed.py b/lama/models/ade20k/segm_lib/utils/data/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d890e28fd2b9e044bdd9494de4a43ad2471eed --- /dev/null +++ b/lama/models/ade20k/segm_lib/utils/data/distributed.py @@ -0,0 +1,58 @@ +import math +import torch +from .sampler import Sampler +from torch.distributed import get_world_size, get_rank + + +class DistributedSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each + process can pass a DistributedSampler instance as a DataLoader sampler, + and load a subset of the original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size. + + Arguments: + dataset: Dataset used for sampling. + num_replicas (optional): Number of processes participating in + distributed training. + rank (optional): Rank of the current process within num_replicas. + """ + + def __init__(self, dataset, num_replicas=None, rank=None): + if num_replicas is None: + num_replicas = get_world_size() + if rank is None: + rank = get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = list(torch.randperm(len(self.dataset), generator=g)) + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/lama/models/ade20k/segm_lib/utils/data/sampler.py b/lama/models/ade20k/segm_lib/utils/data/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..62a9a43bd1d4c21fbdcb262db7da8d4fe27b26de --- /dev/null +++ b/lama/models/ade20k/segm_lib/utils/data/sampler.py @@ -0,0 +1,131 @@ +import torch + + +class Sampler(object): + """Base class for all Samplers. + + Every Sampler subclass has to provide an __iter__ method, providing a way + to iterate over indices of dataset elements, and a __len__ method that + returns the length of the returned iterators. + """ + + def __init__(self, data_source): + pass + + def __iter__(self): + raise NotImplementedError + + def __len__(self): + raise NotImplementedError + + +class SequentialSampler(Sampler): + """Samples elements sequentially, always in the same order. + + Arguments: + data_source (Dataset): dataset to sample from + """ + + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + return iter(range(len(self.data_source))) + + def __len__(self): + return len(self.data_source) + + +class RandomSampler(Sampler): + """Samples elements randomly, without replacement. + + Arguments: + data_source (Dataset): dataset to sample from + """ + + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + return iter(torch.randperm(len(self.data_source)).long()) + + def __len__(self): + return len(self.data_source) + + +class SubsetRandomSampler(Sampler): + """Samples elements randomly from a given list of indices, without replacement. + + Arguments: + indices (list): a list of indices + """ + + def __init__(self, indices): + self.indices = indices + + def __iter__(self): + return (self.indices[i] for i in torch.randperm(len(self.indices))) + + def __len__(self): + return len(self.indices) + + +class WeightedRandomSampler(Sampler): + """Samples elements from [0,..,len(weights)-1] with given probabilities (weights). + + Arguments: + weights (list) : a list of weights, not necessary summing up to one + num_samples (int): number of samples to draw + replacement (bool): if ``True``, samples are drawn with replacement. + If not, they are drawn without replacement, which means that when a + sample index is drawn for a row, it cannot be drawn again for that row. + """ + + def __init__(self, weights, num_samples, replacement=True): + self.weights = torch.DoubleTensor(weights) + self.num_samples = num_samples + self.replacement = replacement + + def __iter__(self): + return iter(torch.multinomial(self.weights, self.num_samples, self.replacement)) + + def __len__(self): + return self.num_samples + + +class BatchSampler(object): + """Wraps another sampler to yield a mini-batch of indices. + + Args: + sampler (Sampler): Base sampler. + batch_size (int): Size of mini-batch. + drop_last (bool): If ``True``, the sampler will drop the last batch if + its size would be less than ``batch_size`` + + Example: + >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + def __init__(self, sampler, batch_size, drop_last): + self.sampler = sampler + self.batch_size = batch_size + self.drop_last = drop_last + + def __iter__(self): + batch = [] + for idx in self.sampler: + batch.append(idx) + if len(batch) == self.batch_size: + yield batch + batch = [] + if len(batch) > 0 and not self.drop_last: + yield batch + + def __len__(self): + if self.drop_last: + return len(self.sampler) // self.batch_size + else: + return (len(self.sampler) + self.batch_size - 1) // self.batch_size diff --git a/lama/models/ade20k/segm_lib/utils/th.py b/lama/models/ade20k/segm_lib/utils/th.py new file mode 100644 index 0000000000000000000000000000000000000000..ca6ef9385e3b5c0a439579d3fd7aa73b5dc62758 --- /dev/null +++ b/lama/models/ade20k/segm_lib/utils/th.py @@ -0,0 +1,41 @@ +import torch +from torch.autograd import Variable +import numpy as np +import collections + +__all__ = ['as_variable', 'as_numpy', 'mark_volatile'] + +def as_variable(obj): + if isinstance(obj, Variable): + return obj + if isinstance(obj, collections.Sequence): + return [as_variable(v) for v in obj] + elif isinstance(obj, collections.Mapping): + return {k: as_variable(v) for k, v in obj.items()} + else: + return Variable(obj) + +def as_numpy(obj): + if isinstance(obj, collections.Sequence): + return [as_numpy(v) for v in obj] + elif isinstance(obj, collections.Mapping): + return {k: as_numpy(v) for k, v in obj.items()} + elif isinstance(obj, Variable): + return obj.data.cpu().numpy() + elif torch.is_tensor(obj): + return obj.cpu().numpy() + else: + return np.array(obj) + +def mark_volatile(obj): + if torch.is_tensor(obj): + obj = Variable(obj) + if isinstance(obj, Variable): + obj.no_grad = True + return obj + elif isinstance(obj, collections.Mapping): + return {k: mark_volatile(o) for k, o in obj.items()} + elif isinstance(obj, collections.Sequence): + return [mark_volatile(o) for o in obj] + else: + return obj diff --git a/lama/models/ade20k/utils.py b/lama/models/ade20k/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f337db7db54c82be041698d694e1403e8918c4c0 --- /dev/null +++ b/lama/models/ade20k/utils.py @@ -0,0 +1,40 @@ +"""Modified from https://github.com/CSAILVision/semantic-segmentation-pytorch""" + +import os +import sys + +import numpy as np +import torch + +try: + from urllib import urlretrieve +except ImportError: + from urllib.request import urlretrieve + + +def load_url(url, model_dir='./pretrained', map_location=None): + if not os.path.exists(model_dir): + os.makedirs(model_dir) + filename = url.split('/')[-1] + cached_file = os.path.join(model_dir, filename) + if not os.path.exists(cached_file): + sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) + urlretrieve(url, cached_file) + return torch.load(cached_file, map_location=map_location) + + +def color_encode(labelmap, colors, mode='RGB'): + labelmap = labelmap.astype('int') + labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3), + dtype=np.uint8) + for label in np.unique(labelmap): + if label < 0: + continue + labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \ + np.tile(colors[label], + (labelmap.shape[0], labelmap.shape[1], 1)) + + if mode == 'BGR': + return labelmap_rgb[:, :, ::-1] + else: + return labelmap_rgb diff --git a/lama/models/lpips_models/alex.pth b/lama/models/lpips_models/alex.pth new file mode 100644 index 0000000000000000000000000000000000000000..fa4067abc5d4da16a7204fd94776506e4868030e --- /dev/null +++ b/lama/models/lpips_models/alex.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df73285e35b22355a2df87cdb6b70b343713b667eddbda73e1977e0c860835c0 +size 6009 diff --git a/lama/models/lpips_models/squeeze.pth b/lama/models/lpips_models/squeeze.pth new file mode 100644 index 0000000000000000000000000000000000000000..f892a84a130828b1c9e2e8156e84fc5a962c665d --- /dev/null +++ b/lama/models/lpips_models/squeeze.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a5350f23600cb79923ce65bb07cbf57dca461329894153e05a1346bd531cf76 +size 10811 diff --git a/lama/models/lpips_models/vgg.pth b/lama/models/lpips_models/vgg.pth new file mode 100644 index 0000000000000000000000000000000000000000..f57dcf5cc764d61c8a460365847fb2137ff0a62d --- /dev/null +++ b/lama/models/lpips_models/vgg.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a78928a0af1e5f0fcb1f3b9e8f8c3a2a5a3de244d830ad5c1feddc79b8432868 +size 7289 diff --git a/lama/outputs/2022-12-06/10-42-09/.hydra/config.yaml b/lama/outputs/2022-12-06/10-42-09/.hydra/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2fa332606f4dee40e0405777b359df85ea174df --- /dev/null +++ b/lama/outputs/2022-12-06/10-42-09/.hydra/config.yaml @@ -0,0 +1,20 @@ +indir: /home/vinker/dev/lama/lama/LaMa_test_images +outdir: /home/vinker/dev/lama/lama/output +model: + path: /home/vinker/dev/lama/lama/big-lama + checkpoint: best.ckpt +dataset: + kind: default + img_suffix: .png + pad_out_to_modulo: 8 +device: cuda +out_key: inpainted +refine: false +refiner: + gpu_ids: 0,1 + modulo: ${dataset.pad_out_to_modulo} + n_iters: 15 + lr: 0.002 + min_side: 512 + max_scales: 3 + px_budget: 1800000 diff --git a/lama/outputs/2022-12-06/10-42-09/.hydra/hydra.yaml b/lama/outputs/2022-12-06/10-42-09/.hydra/hydra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67d78f5d56e3accf1b4bb7ab86a2d5c1f2d632a8 --- /dev/null +++ b/lama/outputs/2022-12-06/10-42-09/.hydra/hydra.yaml @@ -0,0 +1,151 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: [] + task: + - model.path=/home/vinker/dev/lama/lama/big-lama + - indir=/home/vinker/dev/lama/lama/LaMa_test_images + - outdir=/home/vinker/dev/lama/lama/output + job: + name: predict + override_dirname: indir=/home/vinker/dev/lama/lama/LaMa_test_images,model.path=/home/vinker/dev/lama/lama/big-lama,outdir=/home/vinker/dev/lama/lama/output + id: ??? + num: ??? + config_name: default.yaml + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.1.0 + cwd: /home/vinker/dev/lama/lama + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/vinker/dev/lama/lama/configs/prediction + schema: file + provider: main + - path: '' + schema: structured + provider: schema + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/lama/outputs/2022-12-06/10-42-09/.hydra/overrides.yaml b/lama/outputs/2022-12-06/10-42-09/.hydra/overrides.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d71afabfe9f6f0f60ac5a5758b78ba2306ef2815 --- /dev/null +++ b/lama/outputs/2022-12-06/10-42-09/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- model.path=/home/vinker/dev/lama/lama/big-lama +- indir=/home/vinker/dev/lama/lama/LaMa_test_images +- outdir=/home/vinker/dev/lama/lama/output diff --git a/lama/outputs/2022-12-06/10-43-47/.hydra/config.yaml b/lama/outputs/2022-12-06/10-43-47/.hydra/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2fa332606f4dee40e0405777b359df85ea174df --- /dev/null +++ b/lama/outputs/2022-12-06/10-43-47/.hydra/config.yaml @@ -0,0 +1,20 @@ +indir: /home/vinker/dev/lama/lama/LaMa_test_images +outdir: /home/vinker/dev/lama/lama/output +model: + path: /home/vinker/dev/lama/lama/big-lama + checkpoint: best.ckpt +dataset: + kind: default + img_suffix: .png + pad_out_to_modulo: 8 +device: cuda +out_key: inpainted +refine: false +refiner: + gpu_ids: 0,1 + modulo: ${dataset.pad_out_to_modulo} + n_iters: 15 + lr: 0.002 + min_side: 512 + max_scales: 3 + px_budget: 1800000 diff --git a/lama/outputs/2022-12-06/10-43-47/.hydra/hydra.yaml b/lama/outputs/2022-12-06/10-43-47/.hydra/hydra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67d78f5d56e3accf1b4bb7ab86a2d5c1f2d632a8 --- /dev/null +++ b/lama/outputs/2022-12-06/10-43-47/.hydra/hydra.yaml @@ -0,0 +1,151 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: [] + task: + - model.path=/home/vinker/dev/lama/lama/big-lama + - indir=/home/vinker/dev/lama/lama/LaMa_test_images + - outdir=/home/vinker/dev/lama/lama/output + job: + name: predict + override_dirname: indir=/home/vinker/dev/lama/lama/LaMa_test_images,model.path=/home/vinker/dev/lama/lama/big-lama,outdir=/home/vinker/dev/lama/lama/output + id: ??? + num: ??? + config_name: default.yaml + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.1.0 + cwd: /home/vinker/dev/lama/lama + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/vinker/dev/lama/lama/configs/prediction + schema: file + provider: main + - path: '' + schema: structured + provider: schema + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/lama/outputs/2022-12-06/10-43-47/.hydra/overrides.yaml b/lama/outputs/2022-12-06/10-43-47/.hydra/overrides.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d71afabfe9f6f0f60ac5a5758b78ba2306ef2815 --- /dev/null +++ b/lama/outputs/2022-12-06/10-43-47/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- model.path=/home/vinker/dev/lama/lama/big-lama +- indir=/home/vinker/dev/lama/lama/LaMa_test_images +- outdir=/home/vinker/dev/lama/lama/output diff --git a/lama/outputs/2022-12-06/10-44-21/.hydra/config.yaml b/lama/outputs/2022-12-06/10-44-21/.hydra/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f2fa332606f4dee40e0405777b359df85ea174df --- /dev/null +++ b/lama/outputs/2022-12-06/10-44-21/.hydra/config.yaml @@ -0,0 +1,20 @@ +indir: /home/vinker/dev/lama/lama/LaMa_test_images +outdir: /home/vinker/dev/lama/lama/output +model: + path: /home/vinker/dev/lama/lama/big-lama + checkpoint: best.ckpt +dataset: + kind: default + img_suffix: .png + pad_out_to_modulo: 8 +device: cuda +out_key: inpainted +refine: false +refiner: + gpu_ids: 0,1 + modulo: ${dataset.pad_out_to_modulo} + n_iters: 15 + lr: 0.002 + min_side: 512 + max_scales: 3 + px_budget: 1800000 diff --git a/lama/outputs/2022-12-06/10-44-21/.hydra/hydra.yaml b/lama/outputs/2022-12-06/10-44-21/.hydra/hydra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67d78f5d56e3accf1b4bb7ab86a2d5c1f2d632a8 --- /dev/null +++ b/lama/outputs/2022-12-06/10-44-21/.hydra/hydra.yaml @@ -0,0 +1,151 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: [] + task: + - model.path=/home/vinker/dev/lama/lama/big-lama + - indir=/home/vinker/dev/lama/lama/LaMa_test_images + - outdir=/home/vinker/dev/lama/lama/output + job: + name: predict + override_dirname: indir=/home/vinker/dev/lama/lama/LaMa_test_images,model.path=/home/vinker/dev/lama/lama/big-lama,outdir=/home/vinker/dev/lama/lama/output + id: ??? + num: ??? + config_name: default.yaml + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.1.0 + cwd: /home/vinker/dev/lama/lama + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/vinker/dev/lama/lama/configs/prediction + schema: file + provider: main + - path: '' + schema: structured + provider: schema + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/lama/outputs/2022-12-06/10-44-21/.hydra/overrides.yaml b/lama/outputs/2022-12-06/10-44-21/.hydra/overrides.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d71afabfe9f6f0f60ac5a5758b78ba2306ef2815 --- /dev/null +++ b/lama/outputs/2022-12-06/10-44-21/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- model.path=/home/vinker/dev/lama/lama/big-lama +- indir=/home/vinker/dev/lama/lama/LaMa_test_images +- outdir=/home/vinker/dev/lama/lama/output diff --git a/lama/requirements.txt b/lama/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..d412392cc3e08187e33741551ca416342373f500 --- /dev/null +++ b/lama/requirements.txt @@ -0,0 +1,20 @@ +pyyaml +tqdm +numpy +easydict==1.9.0 +scikit-image==0.17.2 +scikit-learn==0.24.2 +opencv-python +tensorflow +joblib +matplotlib +pandas +albumentations==0.5.2 +hydra-core==1.1.0 +pytorch-lightning==1.2.9 +tabulate +kornia==0.5.0 +webdataset +packaging +scikit-learn==0.24.2 +wldhx.yadisk-direct diff --git a/lama/saicinpainting/__init__.py b/lama/saicinpainting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/evaluation/__init__.py b/lama/saicinpainting/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae75844982bdec88fb503aa7b97fb4bd0eb64ecb --- /dev/null +++ b/lama/saicinpainting/evaluation/__init__.py @@ -0,0 +1,33 @@ +import logging + +import torch + +from ..evaluation.evaluator import InpaintingEvaluatorOnline, ssim_fid100_f1, lpips_fid100_f1 +from ..evaluation.losses.base_loss import SSIMScore, LPIPSScore, FIDScore + + +def make_evaluator(kind='default', ssim=True, lpips=True, fid=True, integral_kind=None, **kwargs): + logging.info(f'Make evaluator {kind}') + device = "cuda" if torch.cuda.is_available() else "cpu" + metrics = {} + if ssim: + metrics['ssim'] = SSIMScore() + if lpips: + metrics['lpips'] = LPIPSScore() + if fid: + metrics['fid'] = FIDScore().to(device) + + if integral_kind is None: + integral_func = None + elif integral_kind == 'ssim_fid100_f1': + integral_func = ssim_fid100_f1 + elif integral_kind == 'lpips_fid100_f1': + integral_func = lpips_fid100_f1 + else: + raise ValueError(f'Unexpected integral_kind={integral_kind}') + + if kind == 'default': + return InpaintingEvaluatorOnline(scores=metrics, + integral_func=integral_func, + integral_title=integral_kind, + **kwargs) diff --git a/lama/saicinpainting/evaluation/data.py b/lama/saicinpainting/evaluation/data.py new file mode 100644 index 0000000000000000000000000000000000000000..89a4ea4c9577e6131731444f149eec76978ec260 --- /dev/null +++ b/lama/saicinpainting/evaluation/data.py @@ -0,0 +1,168 @@ +import glob +import os + +import cv2 +import PIL.Image as Image +import numpy as np + +from torch.utils.data import Dataset +import torch.nn.functional as F + + +def load_image(fname, mode='RGB', return_orig=False): + img = np.array(Image.open(fname).convert(mode)) + if img.ndim == 3: + img = np.transpose(img, (2, 0, 1)) + out_img = img.astype('float32') / 255 + if return_orig: + return out_img, img + else: + return out_img + + +def ceil_modulo(x, mod): + if x % mod == 0: + return x + return (x // mod + 1) * mod + + +def pad_img_to_modulo(img, mod): + channels, height, width = img.shape + out_height = ceil_modulo(height, mod) + out_width = ceil_modulo(width, mod) + return np.pad(img, ((0, 0), (0, out_height - height), (0, out_width - width)), mode='symmetric') + + +def pad_tensor_to_modulo(img, mod): + batch_size, channels, height, width = img.shape + out_height = ceil_modulo(height, mod) + out_width = ceil_modulo(width, mod) + return F.pad(img, pad=(0, out_width - width, 0, out_height - height), mode='reflect') + + +def scale_image(img, factor, interpolation=cv2.INTER_AREA): + if img.shape[0] == 1: + img = img[0] + else: + img = np.transpose(img, (1, 2, 0)) + + img = cv2.resize(img, dsize=None, fx=factor, fy=factor, interpolation=interpolation) + + if img.ndim == 2: + img = img[None, ...] + else: + img = np.transpose(img, (2, 0, 1)) + return img + + +class InpaintingDataset(Dataset): + def __init__(self, datadir, img_suffix='.jpg', pad_out_to_modulo=None, scale_factor=None): + self.datadir = datadir + self.mask_filenames = sorted(list(glob.glob(os.path.join(self.datadir, '**', '*mask*.png'), recursive=True))) + self.img_filenames = [fname.rsplit('_mask', 1)[0] + img_suffix for fname in self.mask_filenames] + self.pad_out_to_modulo = pad_out_to_modulo + self.scale_factor = scale_factor + + def __len__(self): + return len(self.mask_filenames) + + def __getitem__(self, i): + image = load_image(self.img_filenames[i], mode='RGB') + mask = load_image(self.mask_filenames[i], mode='L') + result = dict(image=image, mask=mask[None, ...]) + + if self.scale_factor is not None: + result['image'] = scale_image(result['image'], self.scale_factor) + result['mask'] = scale_image(result['mask'], self.scale_factor, interpolation=cv2.INTER_NEAREST) + + if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1: + result['unpad_to_size'] = result['image'].shape[1:] + result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo) + result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo) + + return result + +class OurInpaintingDataset(Dataset): + def __init__(self, datadir, img_suffix='.jpg', pad_out_to_modulo=None, scale_factor=None): + self.datadir = datadir + self.mask_filenames = sorted(list(glob.glob(os.path.join(self.datadir, 'mask', '**', '*mask*.png'), recursive=True))) + self.img_filenames = [os.path.join(self.datadir, 'img', os.path.basename(fname.rsplit('-', 1)[0].rsplit('_', 1)[0]) + '.png') for fname in self.mask_filenames] + self.pad_out_to_modulo = pad_out_to_modulo + self.scale_factor = scale_factor + + def __len__(self): + return len(self.mask_filenames) + + def __getitem__(self, i): + result = dict(image=load_image(self.img_filenames[i], mode='RGB'), + mask=load_image(self.mask_filenames[i], mode='L')[None, ...]) + + if self.scale_factor is not None: + result['image'] = scale_image(result['image'], self.scale_factor) + result['mask'] = scale_image(result['mask'], self.scale_factor) + + if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1: + result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo) + result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo) + + return result + +class PrecomputedInpaintingResultsDataset(InpaintingDataset): + def __init__(self, datadir, predictdir, inpainted_suffix='_inpainted.jpg', **kwargs): + super().__init__(datadir, **kwargs) + if not datadir.endswith('/'): + datadir += '/' + self.predictdir = predictdir + self.pred_filenames = [os.path.join(predictdir, os.path.splitext(fname[len(datadir):])[0] + inpainted_suffix) + for fname in self.mask_filenames] + + def __getitem__(self, i): + result = super().__getitem__(i) + result['inpainted'] = load_image(self.pred_filenames[i]) + if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1: + result['inpainted'] = pad_img_to_modulo(result['inpainted'], self.pad_out_to_modulo) + return result + +class OurPrecomputedInpaintingResultsDataset(OurInpaintingDataset): + def __init__(self, datadir, predictdir, inpainted_suffix="png", **kwargs): + super().__init__(datadir, **kwargs) + if not datadir.endswith('/'): + datadir += '/' + self.predictdir = predictdir + self.pred_filenames = [os.path.join(predictdir, os.path.basename(os.path.splitext(fname)[0]) + f'_inpainted.{inpainted_suffix}') + for fname in self.mask_filenames] + # self.pred_filenames = [os.path.join(predictdir, os.path.splitext(fname[len(datadir):])[0] + inpainted_suffix) + # for fname in self.mask_filenames] + + def __getitem__(self, i): + result = super().__getitem__(i) + result['inpainted'] = self.file_loader(self.pred_filenames[i]) + + if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1: + result['inpainted'] = pad_img_to_modulo(result['inpainted'], self.pad_out_to_modulo) + return result + +class InpaintingEvalOnlineDataset(Dataset): + def __init__(self, indir, mask_generator, img_suffix='.jpg', pad_out_to_modulo=None, scale_factor=None, **kwargs): + self.indir = indir + self.mask_generator = mask_generator + self.img_filenames = sorted(list(glob.glob(os.path.join(self.indir, '**', f'*{img_suffix}' ), recursive=True))) + self.pad_out_to_modulo = pad_out_to_modulo + self.scale_factor = scale_factor + + def __len__(self): + return len(self.img_filenames) + + def __getitem__(self, i): + img, raw_image = load_image(self.img_filenames[i], mode='RGB', return_orig=True) + mask = self.mask_generator(img, raw_image=raw_image) + result = dict(image=img, mask=mask) + + if self.scale_factor is not None: + result['image'] = scale_image(result['image'], self.scale_factor) + result['mask'] = scale_image(result['mask'], self.scale_factor, interpolation=cv2.INTER_NEAREST) + + if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1: + result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo) + result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo) + return result \ No newline at end of file diff --git a/lama/saicinpainting/evaluation/evaluator.py b/lama/saicinpainting/evaluation/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..1feed832921d72f21994bc4af8f882c401a642e6 --- /dev/null +++ b/lama/saicinpainting/evaluation/evaluator.py @@ -0,0 +1,220 @@ +import logging +import math +from typing import Dict + +import numpy as np +import torch +import torch.nn as nn +import tqdm +from torch.utils.data import DataLoader + +from ..evaluation.utils import move_to_device + +LOGGER = logging.getLogger(__name__) + + +class InpaintingEvaluator(): + def __init__(self, dataset, scores, area_grouping=True, bins=10, batch_size=32, device='cuda', + integral_func=None, integral_title=None, clamp_image_range=None): + """ + :param dataset: torch.utils.data.Dataset which contains images and masks + :param scores: dict {score_name: EvaluatorScore object} + :param area_grouping: in addition to the overall scores, allows to compute score for the groups of samples + which are defined by share of area occluded by mask + :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1) + :param batch_size: batch_size for the dataloader + :param device: device to use + """ + self.scores = scores + self.dataset = dataset + + self.area_grouping = area_grouping + self.bins = bins + + self.device = torch.device(device) + + self.dataloader = DataLoader(self.dataset, shuffle=False, batch_size=batch_size) + + self.integral_func = integral_func + self.integral_title = integral_title + self.clamp_image_range = clamp_image_range + + def _get_bin_edges(self): + bin_edges = np.linspace(0, 1, self.bins + 1) + + num_digits = max(0, math.ceil(math.log10(self.bins)) - 1) + interval_names = [] + for idx_bin in range(self.bins): + start_percent, end_percent = round(100 * bin_edges[idx_bin], num_digits), \ + round(100 * bin_edges[idx_bin + 1], num_digits) + start_percent = '{:.{n}f}'.format(start_percent, n=num_digits) + end_percent = '{:.{n}f}'.format(end_percent, n=num_digits) + interval_names.append("{0}-{1}%".format(start_percent, end_percent)) + + groups = [] + for batch in self.dataloader: + mask = batch['mask'] + batch_size = mask.shape[0] + area = mask.to(self.device).reshape(batch_size, -1).mean(dim=-1) + bin_indices = np.searchsorted(bin_edges, area.detach().cpu().numpy(), side='right') - 1 + # corner case: when area is equal to 1, bin_indices should return bins - 1, not bins for that element + bin_indices[bin_indices == self.bins] = self.bins - 1 + groups.append(bin_indices) + groups = np.hstack(groups) + + return groups, interval_names + + def evaluate(self, model=None): + """ + :param model: callable with signature (image_batch, mask_batch); should return inpainted_batch + :return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or + name of the particular group arranged by area of mask (e.g. '10-20%') + and score statistics for the group as values. + """ + results = dict() + if self.area_grouping: + groups, interval_names = self._get_bin_edges() + else: + groups = None + + for score_name, score in tqdm.auto.tqdm(self.scores.items(), desc='scores'): + score.to(self.device) + with torch.no_grad(): + score.reset() + for batch in tqdm.auto.tqdm(self.dataloader, desc=score_name, leave=False): + batch = move_to_device(batch, self.device) + image_batch, mask_batch = batch['image'], batch['mask'] + if self.clamp_image_range is not None: + image_batch = torch.clamp(image_batch, + min=self.clamp_image_range[0], + max=self.clamp_image_range[1]) + if model is None: + assert 'inpainted' in batch, \ + 'Model is None, so we expected precomputed inpainting results at key "inpainted"' + inpainted_batch = batch['inpainted'] + else: + inpainted_batch = model(image_batch, mask_batch) + score(inpainted_batch, image_batch, mask_batch) + total_results, group_results = score.get_value(groups=groups) + + results[(score_name, 'total')] = total_results + if groups is not None: + for group_index, group_values in group_results.items(): + group_name = interval_names[group_index] + results[(score_name, group_name)] = group_values + + if self.integral_func is not None: + results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results)) + + return results + + +def ssim_fid100_f1(metrics, fid_scale=100): + ssim = metrics[('ssim', 'total')]['mean'] + fid = metrics[('fid', 'total')]['mean'] + fid_rel = max(0, fid_scale - fid) / fid_scale + f1 = 2 * ssim * fid_rel / (ssim + fid_rel + 1e-3) + return f1 + + +def lpips_fid100_f1(metrics, fid_scale=100): + neg_lpips = 1 - metrics[('lpips', 'total')]['mean'] # invert, so bigger is better + fid = metrics[('fid', 'total')]['mean'] + fid_rel = max(0, fid_scale - fid) / fid_scale + f1 = 2 * neg_lpips * fid_rel / (neg_lpips + fid_rel + 1e-3) + return f1 + + + +class InpaintingEvaluatorOnline(nn.Module): + def __init__(self, scores, bins=10, image_key='image', inpainted_key='inpainted', + integral_func=None, integral_title=None, clamp_image_range=None): + """ + :param scores: dict {score_name: EvaluatorScore object} + :param bins: number of groups, partition is generated by np.linspace(0., 1., bins + 1) + :param device: device to use + """ + super().__init__() + LOGGER.info(f'{type(self)} init called') + self.scores = nn.ModuleDict(scores) + self.image_key = image_key + self.inpainted_key = inpainted_key + self.bins_num = bins + self.bin_edges = np.linspace(0, 1, self.bins_num + 1) + + num_digits = max(0, math.ceil(math.log10(self.bins_num)) - 1) + self.interval_names = [] + for idx_bin in range(self.bins_num): + start_percent, end_percent = round(100 * self.bin_edges[idx_bin], num_digits), \ + round(100 * self.bin_edges[idx_bin + 1], num_digits) + start_percent = '{:.{n}f}'.format(start_percent, n=num_digits) + end_percent = '{:.{n}f}'.format(end_percent, n=num_digits) + self.interval_names.append("{0}-{1}%".format(start_percent, end_percent)) + + self.groups = [] + + self.integral_func = integral_func + self.integral_title = integral_title + self.clamp_image_range = clamp_image_range + + LOGGER.info(f'{type(self)} init done') + + def _get_bins(self, mask_batch): + batch_size = mask_batch.shape[0] + area = mask_batch.view(batch_size, -1).mean(dim=-1).detach().cpu().numpy() + bin_indices = np.clip(np.searchsorted(self.bin_edges, area) - 1, 0, self.bins_num - 1) + return bin_indices + + def forward(self, batch: Dict[str, torch.Tensor]): + """ + Calculate and accumulate metrics for batch. To finalize evaluation and obtain final metrics, call evaluation_end + :param batch: batch dict with mandatory fields mask, image, inpainted (can be overriden by self.inpainted_key) + """ + result = {} + with torch.no_grad(): + image_batch, mask_batch, inpainted_batch = batch[self.image_key], batch['mask'], batch[self.inpainted_key] + if self.clamp_image_range is not None: + image_batch = torch.clamp(image_batch, + min=self.clamp_image_range[0], + max=self.clamp_image_range[1]) + self.groups.extend(self._get_bins(mask_batch)) + + for score_name, score in self.scores.items(): + result[score_name] = score(inpainted_batch, image_batch, mask_batch) + return result + + def process_batch(self, batch: Dict[str, torch.Tensor]): + return self(batch) + + def evaluation_end(self, states=None): + """:return: dict with (score_name, group_type) as keys, where group_type can be either 'overall' or + name of the particular group arranged by area of mask (e.g. '10-20%') + and score statistics for the group as values. + """ + LOGGER.info(f'{type(self)}: evaluation_end called') + + self.groups = np.array(self.groups) + + results = {} + for score_name, score in self.scores.items(): + LOGGER.info(f'Getting value of {score_name}') + cur_states = [s[score_name] for s in states] if states is not None else None + total_results, group_results = score.get_value(groups=self.groups, states=cur_states) + LOGGER.info(f'Getting value of {score_name} done') + results[(score_name, 'total')] = total_results + + for group_index, group_values in group_results.items(): + group_name = self.interval_names[group_index] + results[(score_name, group_name)] = group_values + + if self.integral_func is not None: + results[(self.integral_title, 'total')] = dict(mean=self.integral_func(results)) + + LOGGER.info(f'{type(self)}: reset scores') + self.groups = [] + for sc in self.scores.values(): + sc.reset() + LOGGER.info(f'{type(self)}: reset scores done') + + LOGGER.info(f'{type(self)}: evaluation_end done') + return results diff --git a/lama/saicinpainting/evaluation/losses/__init__.py b/lama/saicinpainting/evaluation/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/evaluation/losses/base_loss.py b/lama/saicinpainting/evaluation/losses/base_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..7ba022835d90345e292c175f7bc2d8926cd52dce --- /dev/null +++ b/lama/saicinpainting/evaluation/losses/base_loss.py @@ -0,0 +1,528 @@ +import logging +from abc import abstractmethod, ABC + +import numpy as np +import sklearn +import sklearn.svm +import torch +import torch.nn as nn +import torch.nn.functional as F +from joblib import Parallel, delayed +from scipy import linalg + +from ....models.ade20k import SegmentationModule, NUM_CLASS, segm_options +from .fid.inception import InceptionV3 +from .lpips import PerceptualLoss +from .ssim import SSIM + +LOGGER = logging.getLogger(__name__) + + +def get_groupings(groups): + """ + :param groups: group numbers for respective elements + :return: dict of kind {group_idx: indices of the corresponding group elements} + """ + label_groups, count_groups = np.unique(groups, return_counts=True) + + indices = np.argsort(groups) + + grouping = dict() + cur_start = 0 + for label, count in zip(label_groups, count_groups): + cur_end = cur_start + count + cur_indices = indices[cur_start:cur_end] + grouping[label] = cur_indices + cur_start = cur_end + return grouping + + +class EvaluatorScore(nn.Module): + @abstractmethod + def forward(self, pred_batch, target_batch, mask): + pass + + @abstractmethod + def get_value(self, groups=None, states=None): + pass + + @abstractmethod + def reset(self): + pass + + +class PairwiseScore(EvaluatorScore, ABC): + def __init__(self): + super().__init__() + self.individual_values = None + + def get_value(self, groups=None, states=None): + """ + :param groups: + :return: + total_results: dict of kind {'mean': score mean, 'std': score std} + group_results: None, if groups is None; + else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} + """ + individual_values = torch.cat(states, dim=-1).reshape(-1).cpu().numpy() if states is not None \ + else self.individual_values + + total_results = { + 'mean': individual_values.mean(), + 'std': individual_values.std() + } + + if groups is None: + return total_results, None + + group_results = dict() + grouping = get_groupings(groups) + for label, index in grouping.items(): + group_scores = individual_values[index] + group_results[label] = { + 'mean': group_scores.mean(), + 'std': group_scores.std() + } + return total_results, group_results + + def reset(self): + self.individual_values = [] + + +class SSIMScore(PairwiseScore): + def __init__(self, window_size=11): + super().__init__() + self.score = SSIM(window_size=window_size, size_average=False).eval() + self.reset() + + def forward(self, pred_batch, target_batch, mask=None): + batch_values = self.score(pred_batch, target_batch) + self.individual_values = np.hstack([ + self.individual_values, batch_values.detach().cpu().numpy() + ]) + return batch_values + + +class LPIPSScore(PairwiseScore): + def __init__(self, model='net-lin', net='vgg', model_path=None, use_gpu=True): + super().__init__() + self.score = PerceptualLoss(model=model, net=net, model_path=model_path, + use_gpu=use_gpu, spatial=False).eval() + self.reset() + + def forward(self, pred_batch, target_batch, mask=None): + batch_values = self.score(pred_batch, target_batch).flatten() + self.individual_values = np.hstack([ + self.individual_values, batch_values.detach().cpu().numpy() + ]) + return batch_values + + +def fid_calculate_activation_statistics(act): + mu = np.mean(act, axis=0) + sigma = np.cov(act, rowvar=False) + return mu, sigma + + +def calculate_frechet_distance(activations_pred, activations_target, eps=1e-6): + mu1, sigma1 = fid_calculate_activation_statistics(activations_pred) + mu2, sigma2 = fid_calculate_activation_statistics(activations_target) + + diff = mu1 - mu2 + + # Product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + LOGGER.warning(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + # if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + + np.trace(sigma2) - 2 * tr_covmean) + + +class FIDScore(EvaluatorScore): + def __init__(self, dims=2048, eps=1e-6): + LOGGER.info("FIDscore init called") + super().__init__() + if getattr(FIDScore, '_MODEL', None) is None: + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] + FIDScore._MODEL = InceptionV3([block_idx]).eval() + self.model = FIDScore._MODEL + self.eps = eps + self.reset() + LOGGER.info("FIDscore init done") + + def forward(self, pred_batch, target_batch, mask=None): + activations_pred = self._get_activations(pred_batch) + activations_target = self._get_activations(target_batch) + + self.activations_pred.append(activations_pred.detach().cpu()) + self.activations_target.append(activations_target.detach().cpu()) + + return activations_pred, activations_target + + def get_value(self, groups=None, states=None): + LOGGER.info("FIDscore get_value called") + activations_pred, activations_target = zip(*states) if states is not None \ + else (self.activations_pred, self.activations_target) + activations_pred = torch.cat(activations_pred).cpu().numpy() + activations_target = torch.cat(activations_target).cpu().numpy() + + total_distance = calculate_frechet_distance(activations_pred, activations_target, eps=self.eps) + total_results = dict(mean=total_distance) + + if groups is None: + group_results = None + else: + group_results = dict() + grouping = get_groupings(groups) + for label, index in grouping.items(): + if len(index) > 1: + group_distance = calculate_frechet_distance(activations_pred[index], activations_target[index], + eps=self.eps) + group_results[label] = dict(mean=group_distance) + + else: + group_results[label] = dict(mean=float('nan')) + + self.reset() + + LOGGER.info("FIDscore get_value done") + + return total_results, group_results + + def reset(self): + self.activations_pred = [] + self.activations_target = [] + + def _get_activations(self, batch): + activations = self.model(batch)[0] + if activations.shape[2] != 1 or activations.shape[3] != 1: + assert False, \ + 'We should not have got here, because Inception always scales inputs to 299x299' + # activations = F.adaptive_avg_pool2d(activations, output_size=(1, 1)) + activations = activations.squeeze(-1).squeeze(-1) + return activations + + +class SegmentationAwareScore(EvaluatorScore): + def __init__(self, weights_path): + super().__init__() + self.segm_network = SegmentationModule(weights_path=weights_path, use_default_normalization=True).eval() + self.target_class_freq_by_image_total = [] + self.target_class_freq_by_image_mask = [] + self.pred_class_freq_by_image_mask = [] + + def forward(self, pred_batch, target_batch, mask): + pred_segm_flat = self.segm_network.predict(pred_batch)[0].view(pred_batch.shape[0], -1).long().detach().cpu().numpy() + target_segm_flat = self.segm_network.predict(target_batch)[0].view(pred_batch.shape[0], -1).long().detach().cpu().numpy() + mask_flat = (mask.view(mask.shape[0], -1) > 0.5).detach().cpu().numpy() + + batch_target_class_freq_total = [] + batch_target_class_freq_mask = [] + batch_pred_class_freq_mask = [] + + for cur_pred_segm, cur_target_segm, cur_mask in zip(pred_segm_flat, target_segm_flat, mask_flat): + cur_target_class_freq_total = np.bincount(cur_target_segm, minlength=NUM_CLASS)[None, ...] + cur_target_class_freq_mask = np.bincount(cur_target_segm[cur_mask], minlength=NUM_CLASS)[None, ...] + cur_pred_class_freq_mask = np.bincount(cur_pred_segm[cur_mask], minlength=NUM_CLASS)[None, ...] + + self.target_class_freq_by_image_total.append(cur_target_class_freq_total) + self.target_class_freq_by_image_mask.append(cur_target_class_freq_mask) + self.pred_class_freq_by_image_mask.append(cur_pred_class_freq_mask) + + batch_target_class_freq_total.append(cur_target_class_freq_total) + batch_target_class_freq_mask.append(cur_target_class_freq_mask) + batch_pred_class_freq_mask.append(cur_pred_class_freq_mask) + + batch_target_class_freq_total = np.concatenate(batch_target_class_freq_total, axis=0) + batch_target_class_freq_mask = np.concatenate(batch_target_class_freq_mask, axis=0) + batch_pred_class_freq_mask = np.concatenate(batch_pred_class_freq_mask, axis=0) + return batch_target_class_freq_total, batch_target_class_freq_mask, batch_pred_class_freq_mask + + def reset(self): + super().reset() + self.target_class_freq_by_image_total = [] + self.target_class_freq_by_image_mask = [] + self.pred_class_freq_by_image_mask = [] + + +def distribute_values_to_classes(target_class_freq_by_image_mask, values, idx2name): + assert target_class_freq_by_image_mask.ndim == 2 and target_class_freq_by_image_mask.shape[0] == values.shape[0] + total_class_freq = target_class_freq_by_image_mask.sum(0) + distr_values = (target_class_freq_by_image_mask * values[..., None]).sum(0) + result = distr_values / (total_class_freq + 1e-3) + return {idx2name[i]: val for i, val in enumerate(result) if total_class_freq[i] > 0} + + +def get_segmentation_idx2name(): + return {i - 1: name for i, name in segm_options['classes'].set_index('Idx', drop=True)['Name'].to_dict().items()} + + +class SegmentationAwarePairwiseScore(SegmentationAwareScore): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.individual_values = [] + self.segm_idx2name = get_segmentation_idx2name() + + def forward(self, pred_batch, target_batch, mask): + cur_class_stats = super().forward(pred_batch, target_batch, mask) + score_values = self.calc_score(pred_batch, target_batch, mask) + self.individual_values.append(score_values) + return cur_class_stats + (score_values,) + + @abstractmethod + def calc_score(self, pred_batch, target_batch, mask): + raise NotImplementedError() + + def get_value(self, groups=None, states=None): + """ + :param groups: + :return: + total_results: dict of kind {'mean': score mean, 'std': score std} + group_results: None, if groups is None; + else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} + """ + if states is not None: + (target_class_freq_by_image_total, + target_class_freq_by_image_mask, + pred_class_freq_by_image_mask, + individual_values) = states + else: + target_class_freq_by_image_total = self.target_class_freq_by_image_total + target_class_freq_by_image_mask = self.target_class_freq_by_image_mask + pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask + individual_values = self.individual_values + + target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0) + target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0) + pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0) + individual_values = np.concatenate(individual_values, axis=0) + + total_results = { + 'mean': individual_values.mean(), + 'std': individual_values.std(), + **distribute_values_to_classes(target_class_freq_by_image_mask, individual_values, self.segm_idx2name) + } + + if groups is None: + return total_results, None + + group_results = dict() + grouping = get_groupings(groups) + for label, index in grouping.items(): + group_class_freq = target_class_freq_by_image_mask[index] + group_scores = individual_values[index] + group_results[label] = { + 'mean': group_scores.mean(), + 'std': group_scores.std(), + ** distribute_values_to_classes(group_class_freq, group_scores, self.segm_idx2name) + } + return total_results, group_results + + def reset(self): + super().reset() + self.individual_values = [] + + +class SegmentationClassStats(SegmentationAwarePairwiseScore): + def calc_score(self, pred_batch, target_batch, mask): + return 0 + + def get_value(self, groups=None, states=None): + """ + :param groups: + :return: + total_results: dict of kind {'mean': score mean, 'std': score std} + group_results: None, if groups is None; + else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} + """ + if states is not None: + (target_class_freq_by_image_total, + target_class_freq_by_image_mask, + pred_class_freq_by_image_mask, + _) = states + else: + target_class_freq_by_image_total = self.target_class_freq_by_image_total + target_class_freq_by_image_mask = self.target_class_freq_by_image_mask + pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask + + target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0) + target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0) + pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0) + + target_class_freq_by_image_total_marginal = target_class_freq_by_image_total.sum(0).astype('float32') + target_class_freq_by_image_total_marginal /= target_class_freq_by_image_total_marginal.sum() + + target_class_freq_by_image_mask_marginal = target_class_freq_by_image_mask.sum(0).astype('float32') + target_class_freq_by_image_mask_marginal /= target_class_freq_by_image_mask_marginal.sum() + + pred_class_freq_diff = (pred_class_freq_by_image_mask - target_class_freq_by_image_mask).sum(0) / (target_class_freq_by_image_mask.sum(0) + 1e-3) + + total_results = dict() + total_results.update({f'total_freq/{self.segm_idx2name[i]}': v + for i, v in enumerate(target_class_freq_by_image_total_marginal) + if v > 0}) + total_results.update({f'mask_freq/{self.segm_idx2name[i]}': v + for i, v in enumerate(target_class_freq_by_image_mask_marginal) + if v > 0}) + total_results.update({f'mask_freq_diff/{self.segm_idx2name[i]}': v + for i, v in enumerate(pred_class_freq_diff) + if target_class_freq_by_image_total_marginal[i] > 0}) + + if groups is None: + return total_results, None + + group_results = dict() + grouping = get_groupings(groups) + for label, index in grouping.items(): + group_target_class_freq_by_image_total = target_class_freq_by_image_total[index] + group_target_class_freq_by_image_mask = target_class_freq_by_image_mask[index] + group_pred_class_freq_by_image_mask = pred_class_freq_by_image_mask[index] + + group_target_class_freq_by_image_total_marginal = group_target_class_freq_by_image_total.sum(0).astype('float32') + group_target_class_freq_by_image_total_marginal /= group_target_class_freq_by_image_total_marginal.sum() + + group_target_class_freq_by_image_mask_marginal = group_target_class_freq_by_image_mask.sum(0).astype('float32') + group_target_class_freq_by_image_mask_marginal /= group_target_class_freq_by_image_mask_marginal.sum() + + group_pred_class_freq_diff = (group_pred_class_freq_by_image_mask - group_target_class_freq_by_image_mask).sum(0) / ( + group_target_class_freq_by_image_mask.sum(0) + 1e-3) + + cur_group_results = dict() + cur_group_results.update({f'total_freq/{self.segm_idx2name[i]}': v + for i, v in enumerate(group_target_class_freq_by_image_total_marginal) + if v > 0}) + cur_group_results.update({f'mask_freq/{self.segm_idx2name[i]}': v + for i, v in enumerate(group_target_class_freq_by_image_mask_marginal) + if v > 0}) + cur_group_results.update({f'mask_freq_diff/{self.segm_idx2name[i]}': v + for i, v in enumerate(group_pred_class_freq_diff) + if group_target_class_freq_by_image_total_marginal[i] > 0}) + + group_results[label] = cur_group_results + return total_results, group_results + + +class SegmentationAwareSSIM(SegmentationAwarePairwiseScore): + def __init__(self, *args, window_size=11, **kwargs): + super().__init__(*args, **kwargs) + self.score_impl = SSIM(window_size=window_size, size_average=False).eval() + + def calc_score(self, pred_batch, target_batch, mask): + return self.score_impl(pred_batch, target_batch).detach().cpu().numpy() + + +class SegmentationAwareLPIPS(SegmentationAwarePairwiseScore): + def __init__(self, *args, model='net-lin', net='vgg', model_path=None, use_gpu=True, **kwargs): + super().__init__(*args, **kwargs) + self.score_impl = PerceptualLoss(model=model, net=net, model_path=model_path, + use_gpu=use_gpu, spatial=False).eval() + + def calc_score(self, pred_batch, target_batch, mask): + return self.score_impl(pred_batch, target_batch).flatten().detach().cpu().numpy() + + +def calculade_fid_no_img(img_i, activations_pred, activations_target, eps=1e-6): + activations_pred = activations_pred.copy() + activations_pred[img_i] = activations_target[img_i] + return calculate_frechet_distance(activations_pred, activations_target, eps=eps) + + +class SegmentationAwareFID(SegmentationAwarePairwiseScore): + def __init__(self, *args, dims=2048, eps=1e-6, n_jobs=-1, **kwargs): + super().__init__(*args, **kwargs) + if getattr(FIDScore, '_MODEL', None) is None: + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] + FIDScore._MODEL = InceptionV3([block_idx]).eval() + self.model = FIDScore._MODEL + self.eps = eps + self.n_jobs = n_jobs + + def calc_score(self, pred_batch, target_batch, mask): + activations_pred = self._get_activations(pred_batch) + activations_target = self._get_activations(target_batch) + return activations_pred, activations_target + + def get_value(self, groups=None, states=None): + """ + :param groups: + :return: + total_results: dict of kind {'mean': score mean, 'std': score std} + group_results: None, if groups is None; + else dict {group_idx: {'mean': score mean among group, 'std': score std among group}} + """ + if states is not None: + (target_class_freq_by_image_total, + target_class_freq_by_image_mask, + pred_class_freq_by_image_mask, + activation_pairs) = states + else: + target_class_freq_by_image_total = self.target_class_freq_by_image_total + target_class_freq_by_image_mask = self.target_class_freq_by_image_mask + pred_class_freq_by_image_mask = self.pred_class_freq_by_image_mask + activation_pairs = self.individual_values + + target_class_freq_by_image_total = np.concatenate(target_class_freq_by_image_total, axis=0) + target_class_freq_by_image_mask = np.concatenate(target_class_freq_by_image_mask, axis=0) + pred_class_freq_by_image_mask = np.concatenate(pred_class_freq_by_image_mask, axis=0) + activations_pred, activations_target = zip(*activation_pairs) + activations_pred = np.concatenate(activations_pred, axis=0) + activations_target = np.concatenate(activations_target, axis=0) + + total_results = { + 'mean': calculate_frechet_distance(activations_pred, activations_target, eps=self.eps), + 'std': 0, + **self.distribute_fid_to_classes(target_class_freq_by_image_mask, activations_pred, activations_target) + } + + if groups is None: + return total_results, None + + group_results = dict() + grouping = get_groupings(groups) + for label, index in grouping.items(): + if len(index) > 1: + group_activations_pred = activations_pred[index] + group_activations_target = activations_target[index] + group_class_freq = target_class_freq_by_image_mask[index] + group_results[label] = { + 'mean': calculate_frechet_distance(group_activations_pred, group_activations_target, eps=self.eps), + 'std': 0, + **self.distribute_fid_to_classes(group_class_freq, + group_activations_pred, + group_activations_target) + } + else: + group_results[label] = dict(mean=float('nan'), std=0) + return total_results, group_results + + def distribute_fid_to_classes(self, class_freq, activations_pred, activations_target): + real_fid = calculate_frechet_distance(activations_pred, activations_target, eps=self.eps) + + fid_no_images = Parallel(n_jobs=self.n_jobs)( + delayed(calculade_fid_no_img)(img_i, activations_pred, activations_target, eps=self.eps) + for img_i in range(activations_pred.shape[0]) + ) + errors = real_fid - fid_no_images + return distribute_values_to_classes(class_freq, errors, self.segm_idx2name) + + def _get_activations(self, batch): + activations = self.model(batch)[0] + if activations.shape[2] != 1 or activations.shape[3] != 1: + activations = F.adaptive_avg_pool2d(activations, output_size=(1, 1)) + activations = activations.squeeze(-1).squeeze(-1).detach().cpu().numpy() + return activations diff --git a/lama/saicinpainting/evaluation/losses/fid/__init__.py b/lama/saicinpainting/evaluation/losses/fid/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/evaluation/losses/fid/fid_score.py b/lama/saicinpainting/evaluation/losses/fid/fid_score.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca8e602c21bb6a624d646da3f6479aea033b0ac --- /dev/null +++ b/lama/saicinpainting/evaluation/losses/fid/fid_score.py @@ -0,0 +1,328 @@ +#!/usr/bin/env python3 +"""Calculates the Frechet Inception Distance (FID) to evalulate GANs + +The FID metric calculates the distance between two distributions of images. +Typically, we have summary statistics (mean & covariance matrix) of one +of these distributions, while the 2nd distribution is given by a GAN. + +When run as a stand-alone program, it compares the distribution of +images that are stored as PNG/JPEG at a specified location with a +distribution given by summary statistics (in pickle format). + +The FID is calculated by assuming that X_1 and X_2 are the activations of +the pool_3 layer of the inception net for generated samples and real world +samples respectively. + +See --help to see further details. + +Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead +of Tensorflow + +Copyright 2018 Institute of Bioinformatics, JKU Linz + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import os +import pathlib +from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser + +import numpy as np +import torch +# from scipy.misc import imread +from imageio import imread +from PIL import Image, JpegImagePlugin +from scipy import linalg +from torch.nn.functional import adaptive_avg_pool2d +from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor + +try: + from tqdm import tqdm +except ImportError: + # If not tqdm is not available, provide a mock version of it + def tqdm(x): return x + +try: + from .inception import InceptionV3 +except ModuleNotFoundError: + from inception import InceptionV3 + +parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) +parser.add_argument('path', type=str, nargs=2, + help=('Path to the generated images or ' + 'to .npz statistic files')) +parser.add_argument('--batch-size', type=int, default=50, + help='Batch size to use') +parser.add_argument('--dims', type=int, default=2048, + choices=list(InceptionV3.BLOCK_INDEX_BY_DIM), + help=('Dimensionality of Inception features to use. ' + 'By default, uses pool3 features')) +parser.add_argument('-c', '--gpu', default='', type=str, + help='GPU to use (leave blank for CPU only)') +parser.add_argument('--resize', default=256) + +transform = Compose([Resize(256), CenterCrop(256), ToTensor()]) + + +def get_activations(files, model, batch_size=50, dims=2048, + cuda=False, verbose=False, keep_size=False): + """Calculates the activations of the pool_3 layer for all images. + + Params: + -- files : List of image files paths + -- model : Instance of inception model + -- batch_size : Batch size of images for the model to process at once. + Make sure that the number of samples is a multiple of + the batch size, otherwise some samples are ignored. This + behavior is retained to match the original FID score + implementation. + -- dims : Dimensionality of features returned by Inception + -- cuda : If set to True, use GPU + -- verbose : If set to True and parameter out_step is given, the number + of calculated batches is reported. + Returns: + -- A numpy array of dimension (num images, dims) that contains the + activations of the given tensor when feeding inception with the + query tensor. + """ + model.eval() + + if len(files) % batch_size != 0: + print(('Warning: number of images is not a multiple of the ' + 'batch size. Some samples are going to be ignored.')) + if batch_size > len(files): + print(('Warning: batch size is bigger than the data size. ' + 'Setting batch size to data size')) + batch_size = len(files) + + n_batches = len(files) // batch_size + n_used_imgs = n_batches * batch_size + + pred_arr = np.empty((n_used_imgs, dims)) + + for i in tqdm(range(n_batches)): + if verbose: + print('\rPropagating batch %d/%d' % (i + 1, n_batches), + end='', flush=True) + start = i * batch_size + end = start + batch_size + + # # Official code goes below + # images = np.array([imread(str(f)).astype(np.float32) + # for f in files[start:end]]) + + # # Reshape to (n_images, 3, height, width) + # images = images.transpose((0, 3, 1, 2)) + # images /= 255 + # batch = torch.from_numpy(images).type(torch.FloatTensor) + # # + + t = transform if not keep_size else ToTensor() + + if isinstance(files[0], pathlib.PosixPath): + images = [t(Image.open(str(f))) for f in files[start:end]] + + elif isinstance(files[0], Image.Image): + images = [t(f) for f in files[start:end]] + + else: + raise ValueError(f"Unknown data type for image: {type(files[0])}") + + batch = torch.stack(images) + + if cuda: + batch = batch.cuda() + + pred = model(batch)[0] + + # If model output is not scalar, apply global spatial average pooling. + # This happens if you choose a dimensionality not equal 2048. + if pred.shape[2] != 1 or pred.shape[3] != 1: + pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) + + pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1) + + if verbose: + print(' done') + + return pred_arr + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + + Stable version by Dougal J. Sutherland. + + Params: + -- mu1 : Numpy array containing the activations of a layer of the + inception net (like returned by the function 'get_predictions') + for generated samples. + -- mu2 : The sample mean over activations, precalculated on an + representative data set. + -- sigma1: The covariance matrix over activations for generated samples. + -- sigma2: The covariance matrix over activations, precalculated on an + representative data set. + + Returns: + -- : The Frechet Distance. + """ + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert mu1.shape == mu2.shape, \ + 'Training and test mean vectors have different lengths' + assert sigma1.shape == sigma2.shape, \ + 'Training and test covariances have different dimensions' + + diff = mu1 - mu2 + + # Product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + # if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + + np.trace(sigma2) - 2 * tr_covmean) + + +def calculate_activation_statistics(files, model, batch_size=50, + dims=2048, cuda=False, verbose=False, keep_size=False): + """Calculation of the statistics used by the FID. + Params: + -- files : List of image files paths + -- model : Instance of inception model + -- batch_size : The images numpy array is split into batches with + batch size batch_size. A reasonable batch size + depends on the hardware. + -- dims : Dimensionality of features returned by Inception + -- cuda : If set to True, use GPU + -- verbose : If set to True and parameter out_step is given, the + number of calculated batches is reported. + Returns: + -- mu : The mean over samples of the activations of the pool_3 layer of + the inception model. + -- sigma : The covariance matrix of the activations of the pool_3 layer of + the inception model. + """ + act = get_activations(files, model, batch_size, dims, cuda, verbose, keep_size=keep_size) + mu = np.mean(act, axis=0) + sigma = np.cov(act, rowvar=False) + return mu, sigma + + +def _compute_statistics_of_path(path, model, batch_size, dims, cuda): + if path.endswith('.npz'): + f = np.load(path) + m, s = f['mu'][:], f['sigma'][:] + f.close() + else: + path = pathlib.Path(path) + files = list(path.glob('*.jpg')) + list(path.glob('*.png')) + m, s = calculate_activation_statistics(files, model, batch_size, + dims, cuda) + + return m, s + + +def _compute_statistics_of_images(images, model, batch_size, dims, cuda, keep_size=False): + if isinstance(images, list): # exact paths to files are provided + m, s = calculate_activation_statistics(images, model, batch_size, + dims, cuda, keep_size=keep_size) + + return m, s + + else: + raise ValueError + + +def calculate_fid_given_paths(paths, batch_size, cuda, dims): + """Calculates the FID of two paths""" + for p in paths: + if not os.path.exists(p): + raise RuntimeError('Invalid path: %s' % p) + + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] + + model = InceptionV3([block_idx]) + if cuda: + model.cuda() + + m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size, + dims, cuda) + m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size, + dims, cuda) + fid_value = calculate_frechet_distance(m1, s1, m2, s2) + + return fid_value + + +def calculate_fid_given_images(images, batch_size, cuda, dims, use_globals=False, keep_size=False): + if use_globals: + global FID_MODEL # for multiprocessing + + for imgs in images: + if isinstance(imgs, list) and isinstance(imgs[0], (Image.Image, JpegImagePlugin.JpegImageFile)): + pass + else: + raise RuntimeError('Invalid images') + + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] + + if 'FID_MODEL' not in globals() or not use_globals: + model = InceptionV3([block_idx]) + if cuda: + model.cuda() + + if use_globals: + FID_MODEL = model + + else: + model = FID_MODEL + + m1, s1 = _compute_statistics_of_images(images[0], model, batch_size, + dims, cuda, keep_size=False) + m2, s2 = _compute_statistics_of_images(images[1], model, batch_size, + dims, cuda, keep_size=False) + fid_value = calculate_frechet_distance(m1, s1, m2, s2) + return fid_value + + +if __name__ == '__main__': + args = parser.parse_args() + os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu + + fid_value = calculate_fid_given_paths(args.path, + args.batch_size, + args.gpu != '', + args.dims) + print('FID: ', fid_value) diff --git a/lama/saicinpainting/evaluation/losses/fid/inception.py b/lama/saicinpainting/evaluation/losses/fid/inception.py new file mode 100644 index 0000000000000000000000000000000000000000..e9bd0863b457aaa40c770eaa4acbb142b18fc18b --- /dev/null +++ b/lama/saicinpainting/evaluation/losses/fid/inception.py @@ -0,0 +1,323 @@ +import logging + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision import models + +try: + from torchvision.models.utils import load_state_dict_from_url +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url + +# Inception weights ported to Pytorch from +# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz +FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' + + +LOGGER = logging.getLogger(__name__) + + +class InceptionV3(nn.Module): + """Pretrained InceptionV3 network returning feature maps""" + + # Index of default block of inception to return, + # corresponds to output of final average pooling + DEFAULT_BLOCK_INDEX = 3 + + # Maps feature dimensionality to their output blocks indices + BLOCK_INDEX_BY_DIM = { + 64: 0, # First max pooling features + 192: 1, # Second max pooling featurs + 768: 2, # Pre-aux classifier features + 2048: 3 # Final average pooling features + } + + def __init__(self, + output_blocks=[DEFAULT_BLOCK_INDEX], + resize_input=True, + normalize_input=True, + requires_grad=False, + use_fid_inception=True): + """Build pretrained InceptionV3 + + Parameters + ---------- + output_blocks : list of int + Indices of blocks to return features of. Possible values are: + - 0: corresponds to output of first max pooling + - 1: corresponds to output of second max pooling + - 2: corresponds to output which is fed to aux classifier + - 3: corresponds to output of final average pooling + resize_input : bool + If true, bilinearly resizes input to width and height 299 before + feeding input to model. As the network without fully connected + layers is fully convolutional, it should be able to handle inputs + of arbitrary size, so resizing might not be strictly needed + normalize_input : bool + If true, scales the input from range (0, 1) to the range the + pretrained Inception network expects, namely (-1, 1) + requires_grad : bool + If true, parameters of the model require gradients. Possibly useful + for finetuning the network + use_fid_inception : bool + If true, uses the pretrained Inception model used in Tensorflow's + FID implementation. If false, uses the pretrained Inception model + available in torchvision. The FID Inception model has different + weights and a slightly different structure from torchvision's + Inception model. If you want to compute FID scores, you are + strongly advised to set this parameter to true to get comparable + results. + """ + super(InceptionV3, self).__init__() + + self.resize_input = resize_input + self.normalize_input = normalize_input + self.output_blocks = sorted(output_blocks) + self.last_needed_block = max(output_blocks) + + assert self.last_needed_block <= 3, \ + 'Last possible output block index is 3' + + self.blocks = nn.ModuleList() + + if use_fid_inception: + inception = fid_inception_v3() + else: + inception = models.inception_v3(pretrained=True) + + # Block 0: input to maxpool1 + block0 = [ + inception.Conv2d_1a_3x3, + inception.Conv2d_2a_3x3, + inception.Conv2d_2b_3x3, + nn.MaxPool2d(kernel_size=3, stride=2) + ] + self.blocks.append(nn.Sequential(*block0)) + + # Block 1: maxpool1 to maxpool2 + if self.last_needed_block >= 1: + block1 = [ + inception.Conv2d_3b_1x1, + inception.Conv2d_4a_3x3, + nn.MaxPool2d(kernel_size=3, stride=2) + ] + self.blocks.append(nn.Sequential(*block1)) + + # Block 2: maxpool2 to aux classifier + if self.last_needed_block >= 2: + block2 = [ + inception.Mixed_5b, + inception.Mixed_5c, + inception.Mixed_5d, + inception.Mixed_6a, + inception.Mixed_6b, + inception.Mixed_6c, + inception.Mixed_6d, + inception.Mixed_6e, + ] + self.blocks.append(nn.Sequential(*block2)) + + # Block 3: aux classifier to final avgpool + if self.last_needed_block >= 3: + block3 = [ + inception.Mixed_7a, + inception.Mixed_7b, + inception.Mixed_7c, + nn.AdaptiveAvgPool2d(output_size=(1, 1)) + ] + self.blocks.append(nn.Sequential(*block3)) + + for param in self.parameters(): + param.requires_grad = requires_grad + + def forward(self, inp): + """Get Inception feature maps + + Parameters + ---------- + inp : torch.autograd.Variable + Input tensor of shape Bx3xHxW. Values are expected to be in + range (0, 1) + + Returns + ------- + List of torch.autograd.Variable, corresponding to the selected output + block, sorted ascending by index + """ + outp = [] + x = inp + + if self.resize_input: + x = F.interpolate(x, + size=(299, 299), + mode='bilinear', + align_corners=False) + + if self.normalize_input: + x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) + + for idx, block in enumerate(self.blocks): + x = block(x) + if idx in self.output_blocks: + outp.append(x) + + if idx == self.last_needed_block: + break + + return outp + + +def fid_inception_v3(): + """Build pretrained Inception model for FID computation + + The Inception model for FID computation uses a different set of weights + and has a slightly different structure than torchvision's Inception. + + This method first constructs torchvision's Inception and then patches the + necessary parts that are different in the FID Inception model. + """ + LOGGER.info('fid_inception_v3 called') + inception = models.inception_v3(num_classes=1008, + aux_logits=False, + pretrained=False) + LOGGER.info('models.inception_v3 done') + inception.Mixed_5b = FIDInceptionA(192, pool_features=32) + inception.Mixed_5c = FIDInceptionA(256, pool_features=64) + inception.Mixed_5d = FIDInceptionA(288, pool_features=64) + inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) + inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) + inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) + inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) + inception.Mixed_7b = FIDInceptionE_1(1280) + inception.Mixed_7c = FIDInceptionE_2(2048) + + LOGGER.info('fid_inception_v3 patching done') + + state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) + LOGGER.info('fid_inception_v3 weights downloaded') + + inception.load_state_dict(state_dict) + LOGGER.info('fid_inception_v3 weights loaded into model') + + return inception + + +class FIDInceptionA(models.inception.InceptionA): + """InceptionA block patched for FID computation""" + def __init__(self, in_channels, pool_features): + super(FIDInceptionA, self).__init__(in_channels, pool_features) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + # Patch: Tensorflow's average pool does not use the padded zero's in + # its average calculation + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, + count_include_pad=False) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class FIDInceptionC(models.inception.InceptionC): + """InceptionC block patched for FID computation""" + def __init__(self, in_channels, channels_7x7): + super(FIDInceptionC, self).__init__(in_channels, channels_7x7) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + # Patch: Tensorflow's average pool does not use the padded zero's in + # its average calculation + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, + count_include_pad=False) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return torch.cat(outputs, 1) + + +class FIDInceptionE_1(models.inception.InceptionE): + """First InceptionE block patched for FID computation""" + def __init__(self, in_channels): + super(FIDInceptionE_1, self).__init__(in_channels) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + # Patch: Tensorflow's average pool does not use the padded zero's in + # its average calculation + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, + count_include_pad=False) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class FIDInceptionE_2(models.inception.InceptionE): + """Second InceptionE block patched for FID computation""" + def __init__(self, in_channels): + super(FIDInceptionE_2, self).__init__(in_channels) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + # Patch: The FID Inception model uses max pooling instead of average + # pooling. This is likely an error in this specific Inception + # implementation, as other Inception models use average pooling here + # (which matches the description in the paper). + branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) diff --git a/lama/saicinpainting/evaluation/losses/lpips.py b/lama/saicinpainting/evaluation/losses/lpips.py new file mode 100644 index 0000000000000000000000000000000000000000..fd0161001018d32bf746d5cb990feb6bf73a51f6 --- /dev/null +++ b/lama/saicinpainting/evaluation/losses/lpips.py @@ -0,0 +1,891 @@ +############################################################ +# The contents below have been combined using files in the # +# following repository: # +# https://github.com/richzhang/PerceptualSimilarity # +############################################################ + +############################################################ +# __init__.py # +############################################################ + +import numpy as np +from skimage.metrics import structural_similarity +import torch + +from ....saicinpainting.utils import get_shape + + +class PerceptualLoss(torch.nn.Module): + def __init__(self, model='net-lin', net='alex', colorspace='rgb', model_path=None, spatial=False, use_gpu=True): + # VGG using our perceptually-learned weights (LPIPS metric) + # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss + super(PerceptualLoss, self).__init__() + self.use_gpu = use_gpu + self.spatial = spatial + self.model = DistModel() + self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, + model_path=model_path, spatial=self.spatial) + + def forward(self, pred, target, normalize=True): + """ + Pred and target are Variables. + If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1] + If normalize is False, assumes the images are already between [-1,+1] + Inputs pred and target are Nx3xHxW + Output pytorch Variable N long + """ + + if normalize: + target = 2 * target - 1 + pred = 2 * pred - 1 + + return self.model(target, pred) + + +def normalize_tensor(in_feat, eps=1e-10): + norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True)) + return in_feat / (norm_factor + eps) + + +def l2(p0, p1, range=255.): + return .5 * np.mean((p0 / range - p1 / range) ** 2) + + +def psnr(p0, p1, peak=255.): + return 10 * np.log10(peak ** 2 / np.mean((1. * p0 - 1. * p1) ** 2)) + + +def dssim(p0, p1, range=255.): + return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. + + +def rgb2lab(in_img, mean_cent=False): + from skimage import color + img_lab = color.rgb2lab(in_img) + if (mean_cent): + img_lab[:, :, 0] = img_lab[:, :, 0] - 50 + return img_lab + + +def tensor2np(tensor_obj): + # change dimension of a tensor object into a numpy array + return tensor_obj[0].cpu().float().numpy().transpose((1, 2, 0)) + + +def np2tensor(np_obj): + # change dimenion of np array into tensor array + return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1))) + + +def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False): + # image tensor to lab tensor + from skimage import color + + img = tensor2im(image_tensor) + img_lab = color.rgb2lab(img) + if (mc_only): + img_lab[:, :, 0] = img_lab[:, :, 0] - 50 + if (to_norm and not mc_only): + img_lab[:, :, 0] = img_lab[:, :, 0] - 50 + img_lab = img_lab / 100. + + return np2tensor(img_lab) + + +def tensorlab2tensor(lab_tensor, return_inbnd=False): + from skimage import color + import warnings + warnings.filterwarnings("ignore") + + lab = tensor2np(lab_tensor) * 100. + lab[:, :, 0] = lab[:, :, 0] + 50 + + rgb_back = 255. * np.clip(color.lab2rgb(lab.astype('float')), 0, 1) + if (return_inbnd): + # convert back to lab, see if we match + lab_back = color.rgb2lab(rgb_back.astype('uint8')) + mask = 1. * np.isclose(lab_back, lab, atol=2.) + mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis]) + return (im2tensor(rgb_back), mask) + else: + return im2tensor(rgb_back) + + +def rgb2lab(input): + from skimage import color + return color.rgb2lab(input / 255.) + + +def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.): + image_numpy = image_tensor[0].cpu().float().numpy() + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor + return image_numpy.astype(imtype) + + +def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.): + return torch.Tensor((image / factor - cent) + [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) + + +def tensor2vec(vector_tensor): + return vector_tensor.data.cpu().numpy()[:, :, 0, 0] + + +def voc_ap(rec, prec, use_07_metric=False): + """ ap = voc_ap(rec, prec, [use_07_metric]) + Compute VOC AP given precision and recall. + If use_07_metric is true, uses the + VOC 07 11 point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0. + for t in np.arange(0., 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11. + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.], rec, [1.])) + mpre = np.concatenate(([0.], prec, [0.])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.): + # def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.): + image_numpy = image_tensor[0].cpu().float().numpy() + image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor + return image_numpy.astype(imtype) + + +def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.): + # def im2tensor(image, imtype=np.uint8, cent=1., factor=1.): + return torch.Tensor((image / factor - cent) + [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) + + +############################################################ +# base_model.py # +############################################################ + + +class BaseModel(torch.nn.Module): + def __init__(self): + super().__init__() + + def name(self): + return 'BaseModel' + + def initialize(self, use_gpu=True): + self.use_gpu = use_gpu + + def forward(self): + pass + + def get_image_paths(self): + pass + + def optimize_parameters(self): + pass + + def get_current_visuals(self): + return self.input + + def get_current_errors(self): + return {} + + def save(self, label): + pass + + # helper saving function that can be used by subclasses + def save_network(self, network, path, network_label, epoch_label): + save_filename = '%s_net_%s.pth' % (epoch_label, network_label) + save_path = os.path.join(path, save_filename) + torch.save(network.state_dict(), save_path) + + # helper loading function that can be used by subclasses + def load_network(self, network, network_label, epoch_label): + save_filename = '%s_net_%s.pth' % (epoch_label, network_label) + save_path = os.path.join(self.save_dir, save_filename) + print('Loading network from %s' % save_path) + network.load_state_dict(torch.load(save_path, map_location='cpu')) + + def update_learning_rate(): + pass + + def get_image_paths(self): + return self.image_paths + + def save_done(self, flag=False): + np.save(os.path.join(self.save_dir, 'done_flag'), flag) + np.savetxt(os.path.join(self.save_dir, 'done_flag'), [flag, ], fmt='%i') + + +############################################################ +# dist_model.py # +############################################################ + +import os +from collections import OrderedDict +from scipy.ndimage import zoom +from tqdm import tqdm + + +class DistModel(BaseModel): + def name(self): + return self.model_name + + def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, + model_path=None, + use_gpu=True, printNet=False, spatial=False, + is_train=False, lr=.0001, beta1=0.5, version='0.1'): + ''' + INPUTS + model - ['net-lin'] for linearly calibrated network + ['net'] for off-the-shelf network + ['L2'] for L2 distance in Lab colorspace + ['SSIM'] for ssim in RGB colorspace + net - ['squeeze','alex','vgg'] + model_path - if None, will look in weights/[NET_NAME].pth + colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM + use_gpu - bool - whether or not to use a GPU + printNet - bool - whether or not to print network architecture out + spatial - bool - whether to output an array containing varying distances across spatial dimensions + spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). + spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. + spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). + is_train - bool - [True] for training mode + lr - float - initial learning rate + beta1 - float - initial momentum term for adam + version - 0.1 for latest, 0.0 was original (with a bug) + ''' + BaseModel.initialize(self, use_gpu=use_gpu) + + self.model = model + self.net = net + self.is_train = is_train + self.spatial = spatial + self.model_name = '%s [%s]' % (model, net) + + if (self.model == 'net-lin'): # pretrained net + linear layer + self.net = PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, + use_dropout=True, spatial=spatial, version=version, lpips=True) + kw = dict(map_location='cpu') + if (model_path is None): + import inspect + model_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..', '..', '..', 'models', 'lpips_models', f'{net}.pth')) + + if (not is_train): + self.net.load_state_dict(torch.load(model_path, **kw), strict=False) + + elif (self.model == 'net'): # pretrained network + self.net = PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False) + elif (self.model in ['L2', 'l2']): + self.net = L2(use_gpu=use_gpu, colorspace=colorspace) # not really a network, only for testing + self.model_name = 'L2' + elif (self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']): + self.net = DSSIM(use_gpu=use_gpu, colorspace=colorspace) + self.model_name = 'SSIM' + else: + raise ValueError("Model [%s] not recognized." % self.model) + + self.trainable_parameters = list(self.net.parameters()) + + if self.is_train: # training mode + # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) + self.rankLoss = BCERankingLoss() + self.trainable_parameters += list(self.rankLoss.net.parameters()) + self.lr = lr + self.old_lr = lr + self.optimizer_net = torch.optim.Adam(self.trainable_parameters, lr=lr, betas=(beta1, 0.999)) + else: # test mode + self.net.eval() + + # if (use_gpu): + # self.net.to(gpu_ids[0]) + # self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids) + # if (self.is_train): + # self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0 + + if (printNet): + print('---------- Networks initialized -------------') + print_network(self.net) + print('-----------------------------------------------') + + def forward(self, in0, in1, retPerLayer=False): + ''' Function computes the distance between image patches in0 and in1 + INPUTS + in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] + OUTPUT + computed distances between in0 and in1 + ''' + + return self.net(in0, in1, retPerLayer=retPerLayer) + + # ***** TRAINING FUNCTIONS ***** + def optimize_parameters(self): + self.forward_train() + self.optimizer_net.zero_grad() + self.backward_train() + self.optimizer_net.step() + self.clamp_weights() + + def clamp_weights(self): + for module in self.net.modules(): + if (hasattr(module, 'weight') and module.kernel_size == (1, 1)): + module.weight.data = torch.clamp(module.weight.data, min=0) + + def set_input(self, data): + self.input_ref = data['ref'] + self.input_p0 = data['p0'] + self.input_p1 = data['p1'] + self.input_judge = data['judge'] + + # if (self.use_gpu): + # self.input_ref = self.input_ref.to(device=self.gpu_ids[0]) + # self.input_p0 = self.input_p0.to(device=self.gpu_ids[0]) + # self.input_p1 = self.input_p1.to(device=self.gpu_ids[0]) + # self.input_judge = self.input_judge.to(device=self.gpu_ids[0]) + + # self.var_ref = Variable(self.input_ref, requires_grad=True) + # self.var_p0 = Variable(self.input_p0, requires_grad=True) + # self.var_p1 = Variable(self.input_p1, requires_grad=True) + + def forward_train(self): # run forward pass + # print(self.net.module.scaling_layer.shift) + # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item()) + + assert False, "We shoud've not get here when using LPIPS as a metric" + + self.d0 = self(self.var_ref, self.var_p0) + self.d1 = self(self.var_ref, self.var_p1) + self.acc_r = self.compute_accuracy(self.d0, self.d1, self.input_judge) + + self.var_judge = Variable(1. * self.input_judge).view(self.d0.size()) + + self.loss_total = self.rankLoss(self.d0, self.d1, self.var_judge * 2. - 1.) + + return self.loss_total + + def backward_train(self): + torch.mean(self.loss_total).backward() + + def compute_accuracy(self, d0, d1, judge): + ''' d0, d1 are Variables, judge is a Tensor ''' + d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten() + judge_per = judge.cpu().numpy().flatten() + return d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per) + + def get_current_errors(self): + retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()), + ('acc_r', self.acc_r)]) + + for key in retDict.keys(): + retDict[key] = np.mean(retDict[key]) + + return retDict + + def get_current_visuals(self): + zoom_factor = 256 / self.var_ref.data.size()[2] + + ref_img = tensor2im(self.var_ref.data) + p0_img = tensor2im(self.var_p0.data) + p1_img = tensor2im(self.var_p1.data) + + ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0) + p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0) + p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0) + + return OrderedDict([('ref', ref_img_vis), + ('p0', p0_img_vis), + ('p1', p1_img_vis)]) + + def save(self, path, label): + if (self.use_gpu): + self.save_network(self.net.module, path, '', label) + else: + self.save_network(self.net, path, '', label) + self.save_network(self.rankLoss.net, path, 'rank', label) + + def update_learning_rate(self, nepoch_decay): + lrd = self.lr / nepoch_decay + lr = self.old_lr - lrd + + for param_group in self.optimizer_net.param_groups: + param_group['lr'] = lr + + print('update lr [%s] decay: %f -> %f' % (type, self.old_lr, lr)) + self.old_lr = lr + + +def score_2afc_dataset(data_loader, func, name=''): + ''' Function computes Two Alternative Forced Choice (2AFC) score using + distance function 'func' in dataset 'data_loader' + INPUTS + data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside + func - callable distance function - calling d=func(in0,in1) should take 2 + pytorch tensors with shape Nx3xXxY, and return numpy array of length N + OUTPUTS + [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators + [1] - dictionary with following elements + d0s,d1s - N arrays containing distances between reference patch to perturbed patches + gts - N array in [0,1], preferred patch selected by human evaluators + (closer to "0" for left patch p0, "1" for right patch p1, + "0.6" means 60pct people preferred right patch, 40pct preferred left) + scores - N array in [0,1], corresponding to what percentage function agreed with humans + CONSTS + N - number of test triplets in data_loader + ''' + + d0s = [] + d1s = [] + gts = [] + + for data in tqdm(data_loader.load_data(), desc=name): + d0s += func(data['ref'], data['p0']).data.cpu().numpy().flatten().tolist() + d1s += func(data['ref'], data['p1']).data.cpu().numpy().flatten().tolist() + gts += data['judge'].cpu().numpy().flatten().tolist() + + d0s = np.array(d0s) + d1s = np.array(d1s) + gts = np.array(gts) + scores = (d0s < d1s) * (1. - gts) + (d1s < d0s) * gts + (d1s == d0s) * .5 + + return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores)) + + +def score_jnd_dataset(data_loader, func, name=''): + ''' Function computes JND score using distance function 'func' in dataset 'data_loader' + INPUTS + data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside + func - callable distance function - calling d=func(in0,in1) should take 2 + pytorch tensors with shape Nx3xXxY, and return pytorch array of length N + OUTPUTS + [0] - JND score in [0,1], mAP score (area under precision-recall curve) + [1] - dictionary with following elements + ds - N array containing distances between two patches shown to human evaluator + sames - N array containing fraction of people who thought the two patches were identical + CONSTS + N - number of test triplets in data_loader + ''' + + ds = [] + gts = [] + + for data in tqdm(data_loader.load_data(), desc=name): + ds += func(data['p0'], data['p1']).data.cpu().numpy().tolist() + gts += data['same'].cpu().numpy().flatten().tolist() + + sames = np.array(gts) + ds = np.array(ds) + + sorted_inds = np.argsort(ds) + ds_sorted = ds[sorted_inds] + sames_sorted = sames[sorted_inds] + + TPs = np.cumsum(sames_sorted) + FPs = np.cumsum(1 - sames_sorted) + FNs = np.sum(sames_sorted) - TPs + + precs = TPs / (TPs + FPs) + recs = TPs / (TPs + FNs) + score = voc_ap(recs, precs) + + return (score, dict(ds=ds, sames=sames)) + + +############################################################ +# networks_basic.py # +############################################################ + +import torch.nn as nn +from torch.autograd import Variable +import numpy as np + + +def spatial_average(in_tens, keepdim=True): + return in_tens.mean([2, 3], keepdim=keepdim) + + +def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W + in_H = in_tens.shape[2] + scale_factor = 1. * out_H / in_H + + return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) + + +# Learned perceptual metric +class PNetLin(nn.Module): + def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, + version='0.1', lpips=True): + super(PNetLin, self).__init__() + + self.pnet_type = pnet_type + self.pnet_tune = pnet_tune + self.pnet_rand = pnet_rand + self.spatial = spatial + self.lpips = lpips + self.version = version + self.scaling_layer = ScalingLayer() + + if (self.pnet_type in ['vgg', 'vgg16']): + net_type = vgg16 + self.chns = [64, 128, 256, 512, 512] + elif (self.pnet_type == 'alex'): + net_type = alexnet + self.chns = [64, 192, 384, 256, 256] + elif (self.pnet_type == 'squeeze'): + net_type = squeezenet + self.chns = [64, 128, 256, 384, 384, 512, 512] + self.L = len(self.chns) + + self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) + + if (lpips): + self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) + self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) + self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) + self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) + self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) + self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] + if (self.pnet_type == 'squeeze'): # 7 layers for squeezenet + self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) + self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) + self.lins += [self.lin5, self.lin6] + + def forward(self, in0, in1, retPerLayer=False): + # v0.0 - original release had a bug, where input was not scaled + in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version == '0.1' else ( + in0, in1) + outs0, outs1 = self.net(in0_input), self.net(in1_input) + feats0, feats1, diffs = {}, {}, {} + + for kk in range(self.L): + feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) + diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 + + if (self.lpips): + if (self.spatial): + res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)] + else: + res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)] + else: + if (self.spatial): + res = [upsample(diffs[kk].sum(dim=1, keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)] + else: + res = [spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True) for kk in range(self.L)] + + val = res[0] + for l in range(1, self.L): + val += res[l] + + if (retPerLayer): + return (val, res) + else: + return val + + +class ScalingLayer(nn.Module): + def __init__(self): + super(ScalingLayer, self).__init__() + self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) + self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) + + def forward(self, inp): + return (inp - self.shift) / self.scale + + +class NetLinLayer(nn.Module): + ''' A single linear layer which does a 1x1 conv ''' + + def __init__(self, chn_in, chn_out=1, use_dropout=False): + super(NetLinLayer, self).__init__() + + layers = [nn.Dropout(), ] if (use_dropout) else [] + layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] + self.model = nn.Sequential(*layers) + + +class Dist2LogitLayer(nn.Module): + ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) ''' + + def __init__(self, chn_mid=32, use_sigmoid=True): + super(Dist2LogitLayer, self).__init__() + + layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True), ] + layers += [nn.LeakyReLU(0.2, True), ] + layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True), ] + layers += [nn.LeakyReLU(0.2, True), ] + layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True), ] + if (use_sigmoid): + layers += [nn.Sigmoid(), ] + self.model = nn.Sequential(*layers) + + def forward(self, d0, d1, eps=0.1): + return self.model(torch.cat((d0, d1, d0 - d1, d0 / (d1 + eps), d1 / (d0 + eps)), dim=1)) + + +class BCERankingLoss(nn.Module): + def __init__(self, chn_mid=32): + super(BCERankingLoss, self).__init__() + self.net = Dist2LogitLayer(chn_mid=chn_mid) + # self.parameters = list(self.net.parameters()) + self.loss = torch.nn.BCELoss() + + def forward(self, d0, d1, judge): + per = (judge + 1.) / 2. + self.logit = self.net(d0, d1) + return self.loss(self.logit, per) + + +# L2, DSSIM metrics +class FakeNet(nn.Module): + def __init__(self, use_gpu=True, colorspace='Lab'): + super(FakeNet, self).__init__() + self.use_gpu = use_gpu + self.colorspace = colorspace + + +class L2(FakeNet): + + def forward(self, in0, in1, retPerLayer=None): + assert (in0.size()[0] == 1) # currently only supports batchSize 1 + + if (self.colorspace == 'RGB'): + (N, C, X, Y) = in0.size() + value = torch.mean(torch.mean(torch.mean((in0 - in1) ** 2, dim=1).view(N, 1, X, Y), dim=2).view(N, 1, 1, Y), + dim=3).view(N) + return value + elif (self.colorspace == 'Lab'): + value = l2(tensor2np(tensor2tensorlab(in0.data, to_norm=False)), + tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float') + ret_var = Variable(torch.Tensor((value,))) + # if (self.use_gpu): + # ret_var = ret_var.cuda() + return ret_var + + +class DSSIM(FakeNet): + + def forward(self, in0, in1, retPerLayer=None): + assert (in0.size()[0] == 1) # currently only supports batchSize 1 + + if (self.colorspace == 'RGB'): + value = dssim(1. * tensor2im(in0.data), 1. * tensor2im(in1.data), range=255.).astype('float') + elif (self.colorspace == 'Lab'): + value = dssim(tensor2np(tensor2tensorlab(in0.data, to_norm=False)), + tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float') + ret_var = Variable(torch.Tensor((value,))) + # if (self.use_gpu): + # ret_var = ret_var.cuda() + return ret_var + + +def print_network(net): + num_params = 0 + for param in net.parameters(): + num_params += param.numel() + print('Network', net) + print('Total number of parameters: %d' % num_params) + + +############################################################ +# pretrained_networks.py # +############################################################ + +from collections import namedtuple +import torch +from torchvision import models as tv + + +class squeezenet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(squeezenet, self).__init__() + pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.slice6 = torch.nn.Sequential() + self.slice7 = torch.nn.Sequential() + self.N_slices = 7 + for x in range(2): + self.slice1.add_module(str(x), pretrained_features[x]) + for x in range(2, 5): + self.slice2.add_module(str(x), pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), pretrained_features[x]) + for x in range(10, 11): + self.slice5.add_module(str(x), pretrained_features[x]) + for x in range(11, 12): + self.slice6.add_module(str(x), pretrained_features[x]) + for x in range(12, 13): + self.slice7.add_module(str(x), pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + h = self.slice6(h) + h_relu6 = h + h = self.slice7(h) + h_relu7 = h + vgg_outputs = namedtuple("SqueezeOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5', 'relu6', 'relu7']) + out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7) + + return out + + +class alexnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(alexnet, self).__init__() + alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(2): + self.slice1.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(2, 5): + self.slice2.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(10, 12): + self.slice5.add_module(str(x), alexnet_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) + out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) + + return out + + +class vgg16(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(vgg16, self).__init__() + vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(4): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(4, 9): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(9, 16): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(16, 23): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(23, 30): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1_2 = h + h = self.slice2(h) + h_relu2_2 = h + h = self.slice3(h) + h_relu3_3 = h + h = self.slice4(h) + h_relu4_3 = h + h = self.slice5(h) + h_relu5_3 = h + vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) + out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) + + return out + + +class resnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True, num=18): + super(resnet, self).__init__() + if (num == 18): + self.net = tv.resnet18(pretrained=pretrained) + elif (num == 34): + self.net = tv.resnet34(pretrained=pretrained) + elif (num == 50): + self.net = tv.resnet50(pretrained=pretrained) + elif (num == 101): + self.net = tv.resnet101(pretrained=pretrained) + elif (num == 152): + self.net = tv.resnet152(pretrained=pretrained) + self.N_slices = 5 + + self.conv1 = self.net.conv1 + self.bn1 = self.net.bn1 + self.relu = self.net.relu + self.maxpool = self.net.maxpool + self.layer1 = self.net.layer1 + self.layer2 = self.net.layer2 + self.layer3 = self.net.layer3 + self.layer4 = self.net.layer4 + + def forward(self, X): + h = self.conv1(X) + h = self.bn1(h) + h = self.relu(h) + h_relu1 = h + h = self.maxpool(h) + h = self.layer1(h) + h_conv2 = h + h = self.layer2(h) + h_conv3 = h + h = self.layer3(h) + h_conv4 = h + h = self.layer4(h) + h_conv5 = h + + outputs = namedtuple("Outputs", ['relu1', 'conv2', 'conv3', 'conv4', 'conv5']) + out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) + + return out diff --git a/lama/saicinpainting/evaluation/losses/ssim.py b/lama/saicinpainting/evaluation/losses/ssim.py new file mode 100644 index 0000000000000000000000000000000000000000..ee43a0095408eca98e253dea194db788446f9c0a --- /dev/null +++ b/lama/saicinpainting/evaluation/losses/ssim.py @@ -0,0 +1,74 @@ +import numpy as np +import torch +import torch.nn.functional as F + + +class SSIM(torch.nn.Module): + """SSIM. Modified from: + https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py + """ + + def __init__(self, window_size=11, size_average=True): + super().__init__() + self.window_size = window_size + self.size_average = size_average + self.channel = 1 + self.register_buffer('window', self._create_window(window_size, self.channel)) + + def forward(self, img1, img2): + assert len(img1.shape) == 4 + + channel = img1.size()[1] + + if channel == self.channel and self.window.data.type() == img1.data.type(): + window = self.window + else: + window = self._create_window(self.window_size, channel) + + # window = window.to(img1.get_device()) + window = window.type_as(img1) + + self.window = window + self.channel = channel + + return self._ssim(img1, img2, window, self.window_size, channel, self.size_average) + + def _gaussian(self, window_size, sigma): + gauss = torch.Tensor([ + np.exp(-(x - (window_size // 2)) ** 2 / float(2 * sigma ** 2)) for x in range(window_size) + ]) + return gauss / gauss.sum() + + def _create_window(self, window_size, channel): + _1D_window = self._gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) + return _2D_window.expand(channel, 1, window_size, window_size).contiguous() + + def _ssim(self, img1, img2, window, window_size, channel, size_average=True): + mu1 = F.conv2d(img1, window, padding=(window_size // 2), groups=channel) + mu2 = F.conv2d(img2, window, padding=(window_size // 2), groups=channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1 * mu2 + + sigma1_sq = F.conv2d( + img1 * img1, window, padding=(window_size // 2), groups=channel) - mu1_sq + sigma2_sq = F.conv2d( + img2 * img2, window, padding=(window_size // 2), groups=channel) - mu2_sq + sigma12 = F.conv2d( + img1 * img2, window, padding=(window_size // 2), groups=channel) - mu1_mu2 + + C1 = 0.01 ** 2 + C2 = 0.03 ** 2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / \ + ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) + + if size_average: + return ssim_map.mean() + + return ssim_map.mean(1).mean(1).mean(1) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + return diff --git a/lama/saicinpainting/evaluation/masks/README.md b/lama/saicinpainting/evaluation/masks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cf176bc10fae3b03f139727147c220f2a735c806 --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/README.md @@ -0,0 +1,27 @@ +# Current algorithm + +## Choice of mask objects + +For identification of the objects which are suitable for mask obtaining, panoptic segmentation model +from [detectron2](https://github.com/facebookresearch/detectron2) trained on COCO. Categories of the detected instances +belong either to "stuff" or "things" types. We consider that instances of objects should have category belong +to "things". Besides, we set upper bound on area which is taken by the object — we consider that too big +area indicates either of the instance being a background or a main object which should not be removed. + +## Choice of position for mask + +We consider that input image has size 2^n x 2^m. We downsample it using +[COUNTLESS](https://github.com/william-silversmith/countless) algorithm so the width is equal to +64 = 2^8 = 2^{downsample_levels}. + +### Augmentation + +There are several parameters for augmentation: +- Scaling factor. We limit scaling to the case when a mask after scaling with pivot point in its center fits inside the + image completely. +- + +### Shift + + +## Select diff --git a/lama/saicinpainting/evaluation/masks/__init__.py b/lama/saicinpainting/evaluation/masks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/evaluation/masks/countless/.gitignore b/lama/saicinpainting/evaluation/masks/countless/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..872aa273a4e3ec99d362cefa1c67550f21f3c366 --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/countless/.gitignore @@ -0,0 +1 @@ +results \ No newline at end of file diff --git a/lama/saicinpainting/evaluation/masks/countless/README.md b/lama/saicinpainting/evaluation/masks/countless/README.md new file mode 100644 index 0000000000000000000000000000000000000000..67335464d794776140fd0308f408608f2231309b --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/countless/README.md @@ -0,0 +1,25 @@ +[![Build Status](https://travis-ci.org/william-silversmith/countless.svg?branch=master)](https://travis-ci.org/william-silversmith/countless) + +Python COUNTLESS Downsampling +============================= + +To install: + +`pip install -r requirements.txt` + +To test: + +`python test.py` + +To benchmark countless2d: + +`python python/countless2d.py python/images/gray_segmentation.png` + +To benchmark countless3d: + +`python python/countless3d.py` + +Adjust N and the list of algorithms inside each script to modify the run parameters. + + +Python3 is slightly faster than Python2. \ No newline at end of file diff --git a/lama/saicinpainting/evaluation/masks/countless/__init__.py b/lama/saicinpainting/evaluation/masks/countless/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/evaluation/masks/countless/countless2d.py b/lama/saicinpainting/evaluation/masks/countless/countless2d.py new file mode 100644 index 0000000000000000000000000000000000000000..dc27b73affa20ab1a8a199542469a10aaf1f555a --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/countless/countless2d.py @@ -0,0 +1,529 @@ +from __future__ import print_function, division + +""" +COUNTLESS performance test in Python. + +python countless2d.py ./images/NAMEOFIMAGE +""" + +import six +from six.moves import range +from collections import defaultdict +from functools import reduce +import operator +import io +import os +from PIL import Image +import math +import numpy as np +import random +import sys +import time +from tqdm import tqdm +from scipy import ndimage + +def simplest_countless(data): + """ + Vectorized implementation of downsampling a 2D + image by 2 on each side using the COUNTLESS algorithm. + + data is a 2D numpy array with even dimensions. + """ + sections = [] + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + a, b, c, d = sections + + ab = a * (a == b) # PICK(A,B) + ac = a * (a == c) # PICK(A,C) + bc = b * (b == c) # PICK(B,C) + + a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed + + return a + (a == 0) * d # AB || AC || BC || D + +def quick_countless(data): + """ + Vectorized implementation of downsampling a 2D + image by 2 on each side using the COUNTLESS algorithm. + + data is a 2D numpy array with even dimensions. + """ + sections = [] + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + a, b, c, d = sections + + ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization + bc = b * (b == c) # PICK(B,C) + + a = ab_ac | bc # (PICK(A,B) || PICK(A,C)) or PICK(B,C) + return a + (a == 0) * d # AB || AC || BC || D + +def quickest_countless(data): + """ + Vectorized implementation of downsampling a 2D + image by 2 on each side using the COUNTLESS algorithm. + + data is a 2D numpy array with even dimensions. + """ + sections = [] + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + a, b, c, d = sections + + ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization + ab_ac |= b * (b == c) # PICK(B,C) + return ab_ac + (ab_ac == 0) * d # AB || AC || BC || D + +def quick_countless_xor(data): + """ + Vectorized implementation of downsampling a 2D + image by 2 on each side using the COUNTLESS algorithm. + + data is a 2D numpy array with even dimensions. + """ + sections = [] + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + a, b, c, d = sections + + ab = a ^ (a ^ b) # a or b + ab += (ab != a) * ((ab ^ (ab ^ c)) - b) # b or c + ab += (ab == c) * ((ab ^ (ab ^ d)) - c) # c or d + return ab + +def stippled_countless(data): + """ + Vectorized implementation of downsampling a 2D + image by 2 on each side using the COUNTLESS algorithm + that treats zero as "background" and inflates lone + pixels. + + data is a 2D numpy array with even dimensions. + """ + sections = [] + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + a, b, c, d = sections + + ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization + ab_ac |= b * (b == c) # PICK(B,C) + + nonzero = a + (a == 0) * (b + (b == 0) * c) + return ab_ac + (ab_ac == 0) * (d + (d == 0) * nonzero) # AB || AC || BC || D + +def zero_corrected_countless(data): + """ + Vectorized implementation of downsampling a 2D + image by 2 on each side using the COUNTLESS algorithm. + + data is a 2D numpy array with even dimensions. + """ + # allows us to prevent losing 1/2 a bit of information + # at the top end by using a bigger type. Without this 255 is handled incorrectly. + data, upgraded = upgrade_type(data) + + # offset from zero, raw countless doesn't handle 0 correctly + # we'll remove the extra 1 at the end. + data += 1 + + sections = [] + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + a, b, c, d = sections + + ab = a * (a == b) # PICK(A,B) + ac = a * (a == c) # PICK(A,C) + bc = b * (b == c) # PICK(B,C) + + a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed + + result = a + (a == 0) * d - 1 # a or d - 1 + + if upgraded: + return downgrade_type(result) + + # only need to reset data if we weren't upgraded + # b/c no copy was made in that case + data -= 1 + + return result + +def countless_extreme(data): + nonzeros = np.count_nonzero(data) + # print("nonzeros", nonzeros) + + N = reduce(operator.mul, data.shape) + + if nonzeros == N: + print("quick") + return quick_countless(data) + elif np.count_nonzero(data + 1) == N: + print("quick") + # print("upper", nonzeros) + return quick_countless(data) + else: + return countless(data) + + +def countless(data): + """ + Vectorized implementation of downsampling a 2D + image by 2 on each side using the COUNTLESS algorithm. + + data is a 2D numpy array with even dimensions. + """ + # allows us to prevent losing 1/2 a bit of information + # at the top end by using a bigger type. Without this 255 is handled incorrectly. + data, upgraded = upgrade_type(data) + + # offset from zero, raw countless doesn't handle 0 correctly + # we'll remove the extra 1 at the end. + data += 1 + + sections = [] + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + a, b, c, d = sections + + ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization + ab_ac |= b * (b == c) # PICK(B,C) + result = ab_ac + (ab_ac == 0) * d - 1 # (matches or d) - 1 + + if upgraded: + return downgrade_type(result) + + # only need to reset data if we weren't upgraded + # b/c no copy was made in that case + data -= 1 + + return result + +def upgrade_type(arr): + dtype = arr.dtype + + if dtype == np.uint8: + return arr.astype(np.uint16), True + elif dtype == np.uint16: + return arr.astype(np.uint32), True + elif dtype == np.uint32: + return arr.astype(np.uint64), True + + return arr, False + +def downgrade_type(arr): + dtype = arr.dtype + + if dtype == np.uint64: + return arr.astype(np.uint32) + elif dtype == np.uint32: + return arr.astype(np.uint16) + elif dtype == np.uint16: + return arr.astype(np.uint8) + + return arr + +def odd_to_even(image): + """ + To facilitate 2x2 downsampling segmentation, change an odd sized image into an even sized one. + Works by mirroring the starting 1 pixel edge of the image on odd shaped sides. + + e.g. turn a 3x3x5 image into a 4x4x5 (the x and y are what are getting downsampled) + + For example: [ 3, 2, 4 ] => [ 3, 3, 2, 4 ] which is now easy to downsample. + + """ + shape = np.array(image.shape) + + offset = (shape % 2)[:2] # x,y offset + + # detect if we're dealing with an even + # image. if so it's fine, just return. + if not np.any(offset): + return image + + oddshape = image.shape[:2] + offset + oddshape = np.append(oddshape, shape[2:]) + oddshape = oddshape.astype(int) + + newimg = np.empty(shape=oddshape, dtype=image.dtype) + + ox,oy = offset + sx,sy = oddshape + + newimg[0,0] = image[0,0] # corner + newimg[ox:sx,0] = image[:,0] # x axis line + newimg[0,oy:sy] = image[0,:] # y axis line + + return newimg + +def counting(array): + factor = (2, 2, 1) + shape = array.shape + + while len(shape) < 4: + array = np.expand_dims(array, axis=-1) + shape = array.shape + + output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor)) + output = np.zeros(output_shape, dtype=array.dtype) + + for chan in range(0, shape[3]): + for z in range(0, shape[2]): + for x in range(0, shape[0], 2): + for y in range(0, shape[1], 2): + block = array[ x:x+2, y:y+2, z, chan ] # 2x2 block + + hashtable = defaultdict(int) + for subx, suby in np.ndindex(block.shape[0], block.shape[1]): + hashtable[block[subx, suby]] += 1 + + best = (0, 0) + for segid, val in six.iteritems(hashtable): + if best[1] < val: + best = (segid, val) + + output[ x // 2, y // 2, chan ] = best[0] + + return output + +def ndzoom(array): + if len(array.shape) == 3: + ratio = ( 1 / 2.0, 1 / 2.0, 1.0 ) + else: + ratio = ( 1 / 2.0, 1 / 2.0) + return ndimage.interpolation.zoom(array, ratio, order=1) + +def countless_if(array): + factor = (2, 2, 1) + shape = array.shape + + if len(shape) < 3: + array = array[ :,:, np.newaxis ] + shape = array.shape + + output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(shape, factor)) + output = np.zeros(output_shape, dtype=array.dtype) + + for chan in range(0, shape[2]): + for x in range(0, shape[0], 2): + for y in range(0, shape[1], 2): + block = array[ x:x+2, y:y+2, chan ] # 2x2 block + + if block[0,0] == block[1,0]: + pick = block[0,0] + elif block[0,0] == block[0,1]: + pick = block[0,0] + elif block[1,0] == block[0,1]: + pick = block[1,0] + else: + pick = block[1,1] + + output[ x // 2, y // 2, chan ] = pick + + return np.squeeze(output) + +def downsample_with_averaging(array): + """ + Downsample x by factor using averaging. + + @return: The downsampled array, of the same type as x. + """ + + if len(array.shape) == 3: + factor = (2,2,1) + else: + factor = (2,2) + + if np.array_equal(factor[:3], np.array([1,1,1])): + return array + + output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor)) + temp = np.zeros(output_shape, float) + counts = np.zeros(output_shape, np.int) + for offset in np.ndindex(factor): + part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + indexing_expr = tuple(np.s_[:s] for s in part.shape) + temp[indexing_expr] += part + counts[indexing_expr] += 1 + return np.cast[array.dtype](temp / counts) + +def downsample_with_max_pooling(array): + + factor = (2,2) + + if np.all(np.array(factor, int) == 1): + return array + + sections = [] + + for offset in np.ndindex(factor): + part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + output = sections[0].copy() + + for section in sections[1:]: + np.maximum(output, section, output) + + return output + +def striding(array): + """Downsample x by factor using striding. + + @return: The downsampled array, of the same type as x. + """ + factor = (2,2) + if np.all(np.array(factor, int) == 1): + return array + return array[tuple(np.s_[::f] for f in factor)] + +def benchmark(): + filename = sys.argv[1] + img = Image.open(filename) + data = np.array(img.getdata(), dtype=np.uint8) + + if len(data.shape) == 1: + n_channels = 1 + reshape = (img.height, img.width) + else: + n_channels = min(data.shape[1], 3) + data = data[:, :n_channels] + reshape = (img.height, img.width, n_channels) + + data = data.reshape(reshape).astype(np.uint8) + + methods = [ + simplest_countless, + quick_countless, + quick_countless_xor, + quickest_countless, + stippled_countless, + zero_corrected_countless, + countless, + downsample_with_averaging, + downsample_with_max_pooling, + ndzoom, + striding, + # countless_if, + # counting, + ] + + formats = { + 1: 'L', + 3: 'RGB', + 4: 'RGBA' + } + + if not os.path.exists('./results'): + os.mkdir('./results') + + N = 500 + img_size = float(img.width * img.height) / 1024.0 / 1024.0 + print("N = %d, %dx%d (%.2f MPx) %d chan, %s" % (N, img.width, img.height, img_size, n_channels, filename)) + print("Algorithm\tMPx/sec\tMB/sec\tSec") + for fn in methods: + print(fn.__name__, end='') + sys.stdout.flush() + + start = time.time() + # tqdm is here to show you what's going on the first time you run it. + # Feel free to remove it to get slightly more accurate timing results. + for _ in tqdm(range(N), desc=fn.__name__, disable=True): + result = fn(data) + end = time.time() + print("\r", end='') + + total_time = (end - start) + mpx = N * img_size / total_time + mbytes = N * img_size * n_channels / total_time + # Output in tab separated format to enable copy-paste into excel/numbers + print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time)) + outimg = Image.fromarray(np.squeeze(result), formats[n_channels]) + outimg.save('./results/{}.png'.format(fn.__name__, "PNG")) + +if __name__ == '__main__': + benchmark() + + +# Example results: +# N = 5, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png +# Function MPx/sec MB/sec Sec +# simplest_countless 752.855 752.855 0.01 +# quick_countless 920.328 920.328 0.01 +# zero_corrected_countless 534.143 534.143 0.01 +# countless 644.247 644.247 0.01 +# downsample_with_averaging 372.575 372.575 0.01 +# downsample_with_max_pooling 974.060 974.060 0.01 +# ndzoom 137.517 137.517 0.04 +# striding 38550.588 38550.588 0.00 +# countless_if 4.377 4.377 1.14 +# counting 0.117 0.117 42.85 + +# Run without non-numpy implementations: +# N = 2000, 1024x1024 (1.00 MPx) 1 chan, images/gray_segmentation.png +# Algorithm MPx/sec MB/sec Sec +# simplest_countless 800.522 800.522 2.50 +# quick_countless 945.420 945.420 2.12 +# quickest_countless 947.256 947.256 2.11 +# stippled_countless 544.049 544.049 3.68 +# zero_corrected_countless 575.310 575.310 3.48 +# countless 646.684 646.684 3.09 +# downsample_with_averaging 385.132 385.132 5.19 +# downsample_with_max_poolin 988.361 988.361 2.02 +# ndzoom 163.104 163.104 12.26 +# striding 81589.340 81589.340 0.02 + + + + diff --git a/lama/saicinpainting/evaluation/masks/countless/countless3d.py b/lama/saicinpainting/evaluation/masks/countless/countless3d.py new file mode 100644 index 0000000000000000000000000000000000000000..810a71e4b1fa344dd2d731186516dbfa96c9cd03 --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/countless/countless3d.py @@ -0,0 +1,356 @@ +from six.moves import range +from PIL import Image +import numpy as np +import io +import time +import math +import random +import sys +from collections import defaultdict +from copy import deepcopy +from itertools import combinations +from functools import reduce +from tqdm import tqdm + +from memory_profiler import profile + +def countless5(a,b,c,d,e): + """First stage of generalizing from countless2d. + + You have five slots: A, B, C, D, E + + You can decide if something is the winner by first checking for + matches of three, then matches of two, then picking just one if + the other two tries fail. In countless2d, you just check for matches + of two and then pick one of them otherwise. + + Unfortunately, you need to check ABC, ABD, ABE, BCD, BDE, & CDE. + Then you need to check AB, AC, AD, BC, BD + We skip checking E because if none of these match, we pick E. We can + skip checking AE, BE, CE, DE since if any of those match, E is our boy + so it's redundant. + + So countless grows cominatorially in complexity. + """ + sections = [ a,b,c,d,e ] + + p2 = lambda q,r: q * (q == r) # q if p == q else 0 + p3 = lambda q,r,s: q * ( (q == r) & (r == s) ) # q if q == r == s else 0 + + lor = lambda x,y: x + (x == 0) * y + + results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) ) + results3 = reduce(lor, results3) + + results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) ) + results2 = reduce(lor, results2) + + return reduce(lor, (results3, results2, e)) + +def countless8(a,b,c,d,e,f,g,h): + """Extend countless5 to countless8. Same deal, except we also + need to check for matches of length 4.""" + sections = [ a, b, c, d, e, f, g, h ] + + p2 = lambda q,r: q * (q == r) + p3 = lambda q,r,s: q * ( (q == r) & (r == s) ) + p4 = lambda p,q,r,s: p * ( (p == q) & (q == r) & (r == s) ) + + lor = lambda x,y: x + (x == 0) * y + + results4 = ( p4(x,y,z,w) for x,y,z,w in combinations(sections, 4) ) + results4 = reduce(lor, results4) + + results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) ) + results3 = reduce(lor, results3) + + # We can always use our shortcut of omitting the last element + # for N choose 2 + results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) ) + results2 = reduce(lor, results2) + + return reduce(lor, [ results4, results3, results2, h ]) + +def dynamic_countless3d(data): + """countless8 + dynamic programming. ~2x faster""" + sections = [] + + # shift zeros up one so they don't interfere with bitwise operators + # we'll shift down at the end + data += 1 + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + pick = lambda a,b: a * (a == b) + lor = lambda x,y: x + (x == 0) * y + + subproblems2 = {} + + results2 = None + for x,y in combinations(range(7), 2): + res = pick(sections[x], sections[y]) + subproblems2[(x,y)] = res + if results2 is not None: + results2 += (results2 == 0) * res + else: + results2 = res + + subproblems3 = {} + + results3 = None + for x,y,z in combinations(range(8), 3): + res = pick(subproblems2[(x,y)], sections[z]) + + if z != 7: + subproblems3[(x,y,z)] = res + + if results3 is not None: + results3 += (results3 == 0) * res + else: + results3 = res + + results3 = reduce(lor, (results3, results2, sections[-1])) + + # free memory + results2 = None + subproblems2 = None + res = None + + results4 = ( pick(subproblems3[(x,y,z)], sections[w]) for x,y,z,w in combinations(range(8), 4) ) + results4 = reduce(lor, results4) + subproblems3 = None # free memory + + final_result = lor(results4, results3) - 1 + data -= 1 + return final_result + +def countless3d(data): + """Now write countless8 in such a way that it could be used + to process an image.""" + sections = [] + + # shift zeros up one so they don't interfere with bitwise operators + # we'll shift down at the end + data += 1 + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + factor = (2,2,2) + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + p2 = lambda q,r: q * (q == r) + p3 = lambda q,r,s: q * ( (q == r) & (r == s) ) + p4 = lambda p,q,r,s: p * ( (p == q) & (q == r) & (r == s) ) + + lor = lambda x,y: x + (x == 0) * y + + results4 = ( p4(x,y,z,w) for x,y,z,w in combinations(sections, 4) ) + results4 = reduce(lor, results4) + + results3 = ( p3(x,y,z) for x,y,z in combinations(sections, 3) ) + results3 = reduce(lor, results3) + + results2 = ( p2(x,y) for x,y in combinations(sections[:-1], 2) ) + results2 = reduce(lor, results2) + + final_result = reduce(lor, (results4, results3, results2, sections[-1])) - 1 + data -= 1 + return final_result + +def countless_generalized(data, factor): + assert len(data.shape) == len(factor) + + sections = [] + + mode_of = reduce(lambda x,y: x * y, factor) + majority = int(math.ceil(float(mode_of) / 2)) + + data += 1 + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + def pick(elements): + eq = ( elements[i] == elements[i+1] for i in range(len(elements) - 1) ) + anded = reduce(lambda p,q: p & q, eq) + return elements[0] * anded + + def logical_or(x,y): + return x + (x == 0) * y + + result = ( pick(combo) for combo in combinations(sections, majority) ) + result = reduce(logical_or, result) + for i in range(majority - 1, 3-1, -1): # 3-1 b/c of exclusive bounds + partial_result = ( pick(combo) for combo in combinations(sections, i) ) + partial_result = reduce(logical_or, partial_result) + result = logical_or(result, partial_result) + + partial_result = ( pick(combo) for combo in combinations(sections[:-1], 2) ) + partial_result = reduce(logical_or, partial_result) + result = logical_or(result, partial_result) + + result = logical_or(result, sections[-1]) - 1 + data -= 1 + return result + +def dynamic_countless_generalized(data, factor): + assert len(data.shape) == len(factor) + + sections = [] + + mode_of = reduce(lambda x,y: x * y, factor) + majority = int(math.ceil(float(mode_of) / 2)) + + data += 1 # offset from zero + + # This loop splits the 2D array apart into four arrays that are + # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), + # and (1,1) representing the A, B, C, and D positions from Figure 1. + for offset in np.ndindex(factor): + part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + pick = lambda a,b: a * (a == b) + lor = lambda x,y: x + (x == 0) * y # logical or + + subproblems = [ {}, {} ] + results2 = None + for x,y in combinations(range(len(sections) - 1), 2): + res = pick(sections[x], sections[y]) + subproblems[0][(x,y)] = res + if results2 is not None: + results2 = lor(results2, res) + else: + results2 = res + + results = [ results2 ] + for r in range(3, majority+1): + r_results = None + for combo in combinations(range(len(sections)), r): + res = pick(subproblems[0][combo[:-1]], sections[combo[-1]]) + + if combo[-1] != len(sections) - 1: + subproblems[1][combo] = res + + if r_results is not None: + r_results = lor(r_results, res) + else: + r_results = res + results.append(r_results) + subproblems[0] = subproblems[1] + subproblems[1] = {} + + results.reverse() + final_result = lor(reduce(lor, results), sections[-1]) - 1 + data -= 1 + return final_result + +def downsample_with_averaging(array): + """ + Downsample x by factor using averaging. + + @return: The downsampled array, of the same type as x. + """ + factor = (2,2,2) + + if np.array_equal(factor[:3], np.array([1,1,1])): + return array + + output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor)) + temp = np.zeros(output_shape, float) + counts = np.zeros(output_shape, np.int) + for offset in np.ndindex(factor): + part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + indexing_expr = tuple(np.s_[:s] for s in part.shape) + temp[indexing_expr] += part + counts[indexing_expr] += 1 + return np.cast[array.dtype](temp / counts) + +def downsample_with_max_pooling(array): + + factor = (2,2,2) + + sections = [] + + for offset in np.ndindex(factor): + part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] + sections.append(part) + + output = sections[0].copy() + + for section in sections[1:]: + np.maximum(output, section, output) + + return output + +def striding(array): + """Downsample x by factor using striding. + + @return: The downsampled array, of the same type as x. + """ + factor = (2,2,2) + if np.all(np.array(factor, int) == 1): + return array + return array[tuple(np.s_[::f] for f in factor)] + +def benchmark(): + def countless3d_generalized(img): + return countless_generalized(img, (2,8,1)) + def countless3d_dynamic_generalized(img): + return dynamic_countless_generalized(img, (8,8,1)) + + methods = [ + # countless3d, + # dynamic_countless3d, + countless3d_generalized, + # countless3d_dynamic_generalized, + # striding, + # downsample_with_averaging, + # downsample_with_max_pooling + ] + + data = np.zeros(shape=(16**2, 16**2, 16**2), dtype=np.uint8) + 1 + + N = 5 + + print('Algorithm\tMPx\tMB/sec\tSec\tN=%d' % N) + + for fn in methods: + start = time.time() + for _ in range(N): + result = fn(data) + end = time.time() + + total_time = (end - start) + mpx = N * float(data.shape[0] * data.shape[1] * data.shape[2]) / total_time / 1024.0 / 1024.0 + mbytes = mpx * np.dtype(data.dtype).itemsize + # Output in tab separated format to enable copy-paste into excel/numbers + print("%s\t%.3f\t%.3f\t%.2f" % (fn.__name__, mpx, mbytes, total_time)) + +if __name__ == '__main__': + benchmark() + +# Algorithm MPx MB/sec Sec N=5 +# countless3d 10.564 10.564 60.58 +# dynamic_countless3d 22.717 22.717 28.17 +# countless3d_generalized 9.702 9.702 65.96 +# countless3d_dynamic_generalized 22.720 22.720 28.17 +# striding 253360.506 253360.506 0.00 +# downsample_with_averaging 224.098 224.098 2.86 +# downsample_with_max_pooling 690.474 690.474 0.93 + + + diff --git a/lama/saicinpainting/evaluation/masks/countless/memprof/countless2d_gcim_N_1000.png b/lama/saicinpainting/evaluation/masks/countless/memprof/countless2d_gcim_N_1000.png new file mode 100644 index 0000000000000000000000000000000000000000..557eca7295f50ac9398165b5da873eeb06d10e5c Binary files /dev/null and b/lama/saicinpainting/evaluation/masks/countless/memprof/countless2d_gcim_N_1000.png differ diff --git a/lama/saicinpainting/evaluation/masks/countless/memprof/countless2d_quick_gcim_N_1000.png b/lama/saicinpainting/evaluation/masks/countless/memprof/countless2d_quick_gcim_N_1000.png new file mode 100644 index 0000000000000000000000000000000000000000..2121cef5c7376a47fda376a22832d3e8b9e6ff91 Binary files /dev/null and b/lama/saicinpainting/evaluation/masks/countless/memprof/countless2d_quick_gcim_N_1000.png differ diff --git a/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d.png b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d.png new file mode 100644 index 0000000000000000000000000000000000000000..5b4bf5d5fc400ce25388cc189fd18d61b82a5fd5 Binary files /dev/null and b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d.png differ diff --git a/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_dynamic.png b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_dynamic.png new file mode 100644 index 0000000000000000000000000000000000000000..91bcb420c88e1cad2c9a3152495211e018585aa4 Binary files /dev/null and b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_dynamic.png differ diff --git a/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_dynamic_generalized.png b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_dynamic_generalized.png new file mode 100644 index 0000000000000000000000000000000000000000..5c6137442d6027a99ee7e3d1ba92a7bfbd49dffc Binary files /dev/null and b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_dynamic_generalized.png differ diff --git a/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_generalized.png b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_generalized.png new file mode 100644 index 0000000000000000000000000000000000000000..9193f641f493ae085d226aa3f3468089e1f686ea Binary files /dev/null and b/lama/saicinpainting/evaluation/masks/countless/memprof/countless3d_generalized.png differ diff --git a/lama/saicinpainting/evaluation/masks/countless/requirements.txt b/lama/saicinpainting/evaluation/masks/countless/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbf8c87bf9b4c9fe54cb39d722253c0ab59e63ad --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/countless/requirements.txt @@ -0,0 +1,7 @@ +Pillow>=6.2.0 +numpy>=1.16 +scipy +tqdm +memory_profiler +six +pytest \ No newline at end of file diff --git a/lama/saicinpainting/evaluation/masks/countless/test.py b/lama/saicinpainting/evaluation/masks/countless/test.py new file mode 100644 index 0000000000000000000000000000000000000000..7809beb7aeeb3bcb10d03093a564917b1f2b4786 --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/countless/test.py @@ -0,0 +1,195 @@ +from copy import deepcopy + +import numpy as np + +import countless2d +import countless3d + +def test_countless2d(): + def test_all_cases(fn, test_zero): + case1 = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) # all different + case2 = np.array([ [ 1, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same + case1z = np.array([ [ 0, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # all different + case2z = np.array([ [ 0, 0 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same + case3 = np.array([ [ 1, 1 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # two groups are same + case4 = np.array([ [ 1, 2 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # 3 are the same + case5 = np.array([ [ 5, 5 ], [ 5, 5 ] ]).reshape((2,2,1,1)) # all are the same + + is_255_handled = np.array([ [ 255, 255 ], [ 1, 2 ] ], dtype=np.uint8).reshape((2,2,1,1)) + + test = lambda case: fn(case) + + if test_zero: + assert test(case1z) == [[[[3]]]] # d + assert test(case2z) == [[[[0]]]] # a==b + else: + assert test(case1) == [[[[4]]]] # d + assert test(case2) == [[[[1]]]] # a==b + + assert test(case3) == [[[[1]]]] # a==b + assert test(case4) == [[[[2]]]] # b==c + assert test(case5) == [[[[5]]]] # a==b + + assert test(is_255_handled) == [[[[255]]]] + + assert fn(case1).dtype == case1.dtype + + test_all_cases(countless2d.simplest_countless, False) + test_all_cases(countless2d.quick_countless, False) + test_all_cases(countless2d.quickest_countless, False) + test_all_cases(countless2d.stippled_countless, False) + + + + methods = [ + countless2d.zero_corrected_countless, + countless2d.countless, + countless2d.countless_if, + # countless2d.counting, # counting doesn't respect order so harder to write a test + ] + + for fn in methods: + print(fn.__name__) + test_all_cases(fn, True) + +def test_stippled_countless2d(): + a = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) + b = np.array([ [ 0, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) + c = np.array([ [ 1, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) + d = np.array([ [ 1, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) + e = np.array([ [ 1, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) + f = np.array([ [ 0, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) + g = np.array([ [ 0, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) + h = np.array([ [ 0, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) + i = np.array([ [ 1, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) + j = np.array([ [ 1, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) + k = np.array([ [ 1, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) + l = np.array([ [ 1, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) + m = np.array([ [ 0, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) + n = np.array([ [ 0, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) + o = np.array([ [ 0, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) + z = np.array([ [ 0, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) + + test = countless2d.stippled_countless + + # Note: We only tested non-matching cases above, + # cases f,g,h,i,j,k prove their duals work as well + # b/c if two pixels are black, either one can be chosen + # if they are different or the same. + + assert test(a) == [[[[4]]]] + assert test(b) == [[[[4]]]] + assert test(c) == [[[[4]]]] + assert test(d) == [[[[4]]]] + assert test(e) == [[[[1]]]] + assert test(f) == [[[[4]]]] + assert test(g) == [[[[4]]]] + assert test(h) == [[[[2]]]] + assert test(i) == [[[[4]]]] + assert test(j) == [[[[1]]]] + assert test(k) == [[[[1]]]] + assert test(l) == [[[[1]]]] + assert test(m) == [[[[2]]]] + assert test(n) == [[[[3]]]] + assert test(o) == [[[[4]]]] + assert test(z) == [[[[0]]]] + + bc = np.array([ [ 0, 2 ], [ 2, 4 ] ]).reshape((2,2,1,1)) + bd = np.array([ [ 0, 2 ], [ 3, 2 ] ]).reshape((2,2,1,1)) + cd = np.array([ [ 0, 2 ], [ 3, 3 ] ]).reshape((2,2,1,1)) + + assert test(bc) == [[[[2]]]] + assert test(bd) == [[[[2]]]] + assert test(cd) == [[[[3]]]] + + ab = np.array([ [ 1, 1 ], [ 0, 4 ] ]).reshape((2,2,1,1)) + ac = np.array([ [ 1, 2 ], [ 1, 0 ] ]).reshape((2,2,1,1)) + ad = np.array([ [ 1, 0 ], [ 3, 1 ] ]).reshape((2,2,1,1)) + + assert test(ab) == [[[[1]]]] + assert test(ac) == [[[[1]]]] + assert test(ad) == [[[[1]]]] + +def test_countless3d(): + def test_all_cases(fn): + alldifferent = [ + [ + [1,2], + [3,4], + ], + [ + [5,6], + [7,8] + ] + ] + allsame = [ + [ + [1,1], + [1,1], + ], + [ + [1,1], + [1,1] + ] + ] + + assert fn(np.array(alldifferent)) == [[[8]]] + assert fn(np.array(allsame)) == [[[1]]] + + twosame = deepcopy(alldifferent) + twosame[1][1][0] = 2 + + assert fn(np.array(twosame)) == [[[2]]] + + threemixed = [ + [ + [3,3], + [1,2], + ], + [ + [2,4], + [4,3] + ] + ] + assert fn(np.array(threemixed)) == [[[3]]] + + foursame = [ + [ + [4,4], + [1,2], + ], + [ + [2,4], + [4,3] + ] + ] + + assert fn(np.array(foursame)) == [[[4]]] + + fivesame = [ + [ + [5,4], + [5,5], + ], + [ + [2,4], + [5,5] + ] + ] + + assert fn(np.array(fivesame)) == [[[5]]] + + def countless3d_generalized(img): + return countless3d.countless_generalized(img, (2,2,2)) + def countless3d_dynamic_generalized(img): + return countless3d.dynamic_countless_generalized(img, (2,2,2)) + + methods = [ + countless3d.countless3d, + countless3d.dynamic_countless3d, + countless3d_generalized, + countless3d_dynamic_generalized, + ] + + for fn in methods: + test_all_cases(fn) \ No newline at end of file diff --git a/lama/saicinpainting/evaluation/masks/mask.py b/lama/saicinpainting/evaluation/masks/mask.py new file mode 100644 index 0000000000000000000000000000000000000000..3e34d0675a781fba983cb542f18390255aaf2609 --- /dev/null +++ b/lama/saicinpainting/evaluation/masks/mask.py @@ -0,0 +1,429 @@ +import enum +from copy import deepcopy + +import numpy as np +from skimage import img_as_ubyte +from skimage.transform import rescale, resize +try: + from detectron2 import model_zoo + from detectron2.config import get_cfg + from detectron2.engine import DefaultPredictor + DETECTRON_INSTALLED = True +except: + print("Detectron v2 is not installed") + DETECTRON_INSTALLED = False + +from .countless.countless2d import zero_corrected_countless + + +class ObjectMask(): + def __init__(self, mask): + self.height, self.width = mask.shape + (self.up, self.down), (self.left, self.right) = self._get_limits(mask) + self.mask = mask[self.up:self.down, self.left:self.right].copy() + + @staticmethod + def _get_limits(mask): + def indicator_limits(indicator): + lower = indicator.argmax() + upper = len(indicator) - indicator[::-1].argmax() + return lower, upper + + vertical_indicator = mask.any(axis=1) + vertical_limits = indicator_limits(vertical_indicator) + + horizontal_indicator = mask.any(axis=0) + horizontal_limits = indicator_limits(horizontal_indicator) + + return vertical_limits, horizontal_limits + + def _clean(self): + self.up, self.down, self.left, self.right = 0, 0, 0, 0 + self.mask = np.empty((0, 0)) + + def horizontal_flip(self, inplace=False): + if not inplace: + flipped = deepcopy(self) + return flipped.horizontal_flip(inplace=True) + + self.mask = self.mask[:, ::-1] + return self + + def vertical_flip(self, inplace=False): + if not inplace: + flipped = deepcopy(self) + return flipped.vertical_flip(inplace=True) + + self.mask = self.mask[::-1, :] + return self + + def image_center(self): + y_center = self.up + (self.down - self.up) / 2 + x_center = self.left + (self.right - self.left) / 2 + return y_center, x_center + + def rescale(self, scaling_factor, inplace=False): + if not inplace: + scaled = deepcopy(self) + return scaled.rescale(scaling_factor, inplace=True) + + scaled_mask = rescale(self.mask.astype(float), scaling_factor, order=0) > 0.5 + (up, down), (left, right) = self._get_limits(scaled_mask) + self.mask = scaled_mask[up:down, left:right] + + y_center, x_center = self.image_center() + mask_height, mask_width = self.mask.shape + self.up = int(round(y_center - mask_height / 2)) + self.down = self.up + mask_height + self.left = int(round(x_center - mask_width / 2)) + self.right = self.left + mask_width + return self + + def crop_to_canvas(self, vertical=True, horizontal=True, inplace=False): + if not inplace: + cropped = deepcopy(self) + cropped.crop_to_canvas(vertical=vertical, horizontal=horizontal, inplace=True) + return cropped + + if vertical: + if self.up >= self.height or self.down <= 0: + self._clean() + else: + cut_up, cut_down = max(-self.up, 0), max(self.down - self.height, 0) + if cut_up != 0: + self.mask = self.mask[cut_up:] + self.up = 0 + if cut_down != 0: + self.mask = self.mask[:-cut_down] + self.down = self.height + + if horizontal: + if self.left >= self.width or self.right <= 0: + self._clean() + else: + cut_left, cut_right = max(-self.left, 0), max(self.right - self.width, 0) + if cut_left != 0: + self.mask = self.mask[:, cut_left:] + self.left = 0 + if cut_right != 0: + self.mask = self.mask[:, :-cut_right] + self.right = self.width + + return self + + def restore_full_mask(self, allow_crop=False): + cropped = self.crop_to_canvas(inplace=allow_crop) + mask = np.zeros((cropped.height, cropped.width), dtype=bool) + mask[cropped.up:cropped.down, cropped.left:cropped.right] = cropped.mask + return mask + + def shift(self, vertical=0, horizontal=0, inplace=False): + if not inplace: + shifted = deepcopy(self) + return shifted.shift(vertical=vertical, horizontal=horizontal, inplace=True) + + self.up += vertical + self.down += vertical + self.left += horizontal + self.right += horizontal + return self + + def area(self): + return self.mask.sum() + + +class RigidnessMode(enum.Enum): + soft = 0 + rigid = 1 + + +class SegmentationMask: + def __init__(self, confidence_threshold=0.5, rigidness_mode=RigidnessMode.rigid, + max_object_area=0.3, min_mask_area=0.02, downsample_levels=6, num_variants_per_mask=4, + max_mask_intersection=0.5, max_foreground_coverage=0.5, max_foreground_intersection=0.5, + max_hidden_area=0.2, max_scale_change=0.25, horizontal_flip=True, + max_vertical_shift=0.1, position_shuffle=True): + """ + :param confidence_threshold: float; threshold for confidence of the panoptic segmentator to allow for + the instance. + :param rigidness_mode: RigidnessMode object + when soft, checks intersection only with the object from which the mask_object was produced + when rigid, checks intersection with any foreground class object + :param max_object_area: float; allowed upper bound for to be considered as mask_object. + :param min_mask_area: float; lower bound for mask to be considered valid + :param downsample_levels: int; defines width of the resized segmentation to obtain shifted masks; + :param num_variants_per_mask: int; maximal number of the masks for the same object; + :param max_mask_intersection: float; maximum allowed area fraction of intersection for 2 masks + produced by horizontal shift of the same mask_object; higher value -> more diversity + :param max_foreground_coverage: float; maximum allowed area fraction of intersection for foreground object to be + covered by mask; lower value -> less the objects are covered + :param max_foreground_intersection: float; maximum allowed area of intersection for the mask with foreground + object; lower value -> mask is more on the background than on the objects + :param max_hidden_area: upper bound on part of the object hidden by shifting object outside the screen area; + :param max_scale_change: allowed scale change for the mask_object; + :param horizontal_flip: if horizontal flips are allowed; + :param max_vertical_shift: amount of vertical movement allowed; + :param position_shuffle: shuffle + """ + + assert DETECTRON_INSTALLED, 'Cannot use SegmentationMask without detectron2' + self.cfg = get_cfg() + self.cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")) + self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml") + self.cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = confidence_threshold + self.predictor = DefaultPredictor(self.cfg) + + self.rigidness_mode = RigidnessMode(rigidness_mode) + self.max_object_area = max_object_area + self.min_mask_area = min_mask_area + self.downsample_levels = downsample_levels + self.num_variants_per_mask = num_variants_per_mask + self.max_mask_intersection = max_mask_intersection + self.max_foreground_coverage = max_foreground_coverage + self.max_foreground_intersection = max_foreground_intersection + self.max_hidden_area = max_hidden_area + self.position_shuffle = position_shuffle + + self.max_scale_change = max_scale_change + self.horizontal_flip = horizontal_flip + self.max_vertical_shift = max_vertical_shift + + def get_segmentation(self, img): + im = img_as_ubyte(img) + panoptic_seg, segment_info = self.predictor(im)["panoptic_seg"] + return panoptic_seg, segment_info + + @staticmethod + def _is_power_of_two(n): + return (n != 0) and (n & (n-1) == 0) + + def identify_candidates(self, panoptic_seg, segments_info): + potential_mask_ids = [] + for segment in segments_info: + if not segment["isthing"]: + continue + mask = (panoptic_seg == segment["id"]).int().detach().cpu().numpy() + area = mask.sum().item() / np.prod(panoptic_seg.shape) + if area >= self.max_object_area: + continue + potential_mask_ids.append(segment["id"]) + return potential_mask_ids + + def downsample_mask(self, mask): + height, width = mask.shape + if not (self._is_power_of_two(height) and self._is_power_of_two(width)): + raise ValueError("Image sides are not power of 2.") + + num_iterations = width.bit_length() - 1 - self.downsample_levels + if num_iterations < 0: + raise ValueError(f"Width is lower than 2^{self.downsample_levels}.") + + if height.bit_length() - 1 < num_iterations: + raise ValueError("Height is too low to perform downsampling") + + downsampled = mask + for _ in range(num_iterations): + downsampled = zero_corrected_countless(downsampled) + + return downsampled + + def _augmentation_params(self): + scaling_factor = np.random.uniform(1 - self.max_scale_change, 1 + self.max_scale_change) + if self.horizontal_flip: + horizontal_flip = bool(np.random.choice(2)) + else: + horizontal_flip = False + vertical_shift = np.random.uniform(-self.max_vertical_shift, self.max_vertical_shift) + + return { + "scaling_factor": scaling_factor, + "horizontal_flip": horizontal_flip, + "vertical_shift": vertical_shift + } + + def _get_intersection(self, mask_array, mask_object): + intersection = mask_array[ + mask_object.up:mask_object.down, mask_object.left:mask_object.right + ] & mask_object.mask + return intersection + + def _check_masks_intersection(self, aug_mask, total_mask_area, prev_masks): + for existing_mask in prev_masks: + intersection_area = self._get_intersection(existing_mask, aug_mask).sum() + intersection_existing = intersection_area / existing_mask.sum() + intersection_current = 1 - (aug_mask.area() - intersection_area) / total_mask_area + if (intersection_existing > self.max_mask_intersection) or \ + (intersection_current > self.max_mask_intersection): + return False + return True + + def _check_foreground_intersection(self, aug_mask, foreground): + for existing_mask in foreground: + intersection_area = self._get_intersection(existing_mask, aug_mask).sum() + intersection_existing = intersection_area / existing_mask.sum() + if intersection_existing > self.max_foreground_coverage: + return False + intersection_mask = intersection_area / aug_mask.area() + if intersection_mask > self.max_foreground_intersection: + return False + return True + + def _move_mask(self, mask, foreground): + # Obtaining properties of the original mask_object: + orig_mask = ObjectMask(mask) + + chosen_masks = [] + chosen_parameters = [] + # to fix the case when resizing gives mask_object consisting only of False + scaling_factor_lower_bound = 0. + + for var_idx in range(self.num_variants_per_mask): + # Obtaining augmentation parameters and applying them to the downscaled mask_object + augmentation_params = self._augmentation_params() + augmentation_params["scaling_factor"] = min([ + augmentation_params["scaling_factor"], + 2 * min(orig_mask.up, orig_mask.height - orig_mask.down) / orig_mask.height + 1., + 2 * min(orig_mask.left, orig_mask.width - orig_mask.right) / orig_mask.width + 1. + ]) + augmentation_params["scaling_factor"] = max([ + augmentation_params["scaling_factor"], scaling_factor_lower_bound + ]) + + aug_mask = deepcopy(orig_mask) + aug_mask.rescale(augmentation_params["scaling_factor"], inplace=True) + if augmentation_params["horizontal_flip"]: + aug_mask.horizontal_flip(inplace=True) + total_aug_area = aug_mask.area() + if total_aug_area == 0: + scaling_factor_lower_bound = 1. + continue + + # Fix if the element vertical shift is too strong and shown area is too small: + vertical_area = aug_mask.mask.sum(axis=1) / total_aug_area # share of area taken by rows + # number of rows which are allowed to be hidden from upper and lower parts of image respectively + max_hidden_up = np.searchsorted(vertical_area.cumsum(), self.max_hidden_area) + max_hidden_down = np.searchsorted(vertical_area[::-1].cumsum(), self.max_hidden_area) + # correcting vertical shift, so not too much area will be hidden + augmentation_params["vertical_shift"] = np.clip( + augmentation_params["vertical_shift"], + -(aug_mask.up + max_hidden_up) / aug_mask.height, + (aug_mask.height - aug_mask.down + max_hidden_down) / aug_mask.height + ) + # Applying vertical shift: + vertical_shift = int(round(aug_mask.height * augmentation_params["vertical_shift"])) + aug_mask.shift(vertical=vertical_shift, inplace=True) + aug_mask.crop_to_canvas(vertical=True, horizontal=False, inplace=True) + + # Choosing horizontal shift: + max_hidden_area = self.max_hidden_area - (1 - aug_mask.area() / total_aug_area) + horizontal_area = aug_mask.mask.sum(axis=0) / total_aug_area + max_hidden_left = np.searchsorted(horizontal_area.cumsum(), max_hidden_area) + max_hidden_right = np.searchsorted(horizontal_area[::-1].cumsum(), max_hidden_area) + allowed_shifts = np.arange(-max_hidden_left, aug_mask.width - + (aug_mask.right - aug_mask.left) + max_hidden_right + 1) + allowed_shifts = - (aug_mask.left - allowed_shifts) + + if self.position_shuffle: + np.random.shuffle(allowed_shifts) + + mask_is_found = False + for horizontal_shift in allowed_shifts: + aug_mask_left = deepcopy(aug_mask) + aug_mask_left.shift(horizontal=horizontal_shift, inplace=True) + aug_mask_left.crop_to_canvas(inplace=True) + + prev_masks = [mask] + chosen_masks + is_mask_suitable = self._check_masks_intersection(aug_mask_left, total_aug_area, prev_masks) & \ + self._check_foreground_intersection(aug_mask_left, foreground) + if is_mask_suitable: + aug_draw = aug_mask_left.restore_full_mask() + chosen_masks.append(aug_draw) + augmentation_params["horizontal_shift"] = horizontal_shift / aug_mask_left.width + chosen_parameters.append(augmentation_params) + mask_is_found = True + break + + if not mask_is_found: + break + + return chosen_parameters + + def _prepare_mask(self, mask): + height, width = mask.shape + target_width = width if self._is_power_of_two(width) else (1 << width.bit_length()) + target_height = height if self._is_power_of_two(height) else (1 << height.bit_length()) + + return resize(mask.astype('float32'), (target_height, target_width), order=0, mode='edge').round().astype('int32') + + def get_masks(self, im, return_panoptic=False): + panoptic_seg, segments_info = self.get_segmentation(im) + potential_mask_ids = self.identify_candidates(panoptic_seg, segments_info) + + panoptic_seg_scaled = self._prepare_mask(panoptic_seg.detach().cpu().numpy()) + downsampled = self.downsample_mask(panoptic_seg_scaled) + scene_objects = [] + for segment in segments_info: + if not segment["isthing"]: + continue + mask = downsampled == segment["id"] + if not np.any(mask): + continue + scene_objects.append(mask) + + mask_set = [] + for mask_id in potential_mask_ids: + mask = downsampled == mask_id + if not np.any(mask): + continue + + if self.rigidness_mode is RigidnessMode.soft: + foreground = [mask] + elif self.rigidness_mode is RigidnessMode.rigid: + foreground = scene_objects + else: + raise ValueError(f'Unexpected rigidness_mode: {rigidness_mode}') + + masks_params = self._move_mask(mask, foreground) + + full_mask = ObjectMask((panoptic_seg == mask_id).detach().cpu().numpy()) + + for params in masks_params: + aug_mask = deepcopy(full_mask) + aug_mask.rescale(params["scaling_factor"], inplace=True) + if params["horizontal_flip"]: + aug_mask.horizontal_flip(inplace=True) + + vertical_shift = int(round(aug_mask.height * params["vertical_shift"])) + horizontal_shift = int(round(aug_mask.width * params["horizontal_shift"])) + aug_mask.shift(vertical=vertical_shift, horizontal=horizontal_shift, inplace=True) + aug_mask = aug_mask.restore_full_mask().astype('uint8') + if aug_mask.mean() <= self.min_mask_area: + continue + mask_set.append(aug_mask) + + if return_panoptic: + return mask_set, panoptic_seg.detach().cpu().numpy() + else: + return mask_set + + +def propose_random_square_crop(mask, min_overlap=0.5): + height, width = mask.shape + mask_ys, mask_xs = np.where(mask > 0.5) # mask==0 is known fragment and mask==1 is missing + + if height < width: + crop_size = height + obj_left, obj_right = mask_xs.min(), mask_xs.max() + obj_width = obj_right - obj_left + left_border = max(0, min(width - crop_size - 1, obj_left + obj_width * min_overlap - crop_size)) + right_border = max(left_border + 1, min(width - crop_size, obj_left + obj_width * min_overlap)) + start_x = np.random.randint(left_border, right_border) + return start_x, 0, start_x + crop_size, height + else: + crop_size = width + obj_top, obj_bottom = mask_ys.min(), mask_ys.max() + obj_height = obj_bottom - obj_top + top_border = max(0, min(height - crop_size - 1, obj_top + obj_height * min_overlap - crop_size)) + bottom_border = max(top_border + 1, min(height - crop_size, obj_top + obj_height * min_overlap)) + start_y = np.random.randint(top_border, bottom_border) + return 0, start_y, width, start_y + crop_size diff --git a/lama/saicinpainting/evaluation/refinement.py b/lama/saicinpainting/evaluation/refinement.py new file mode 100644 index 0000000000000000000000000000000000000000..72efe4e2435bff2e0b3bfe6750aa43bc4bbd0b84 --- /dev/null +++ b/lama/saicinpainting/evaluation/refinement.py @@ -0,0 +1,314 @@ +import torch +import torch.nn as nn +from torch.optim import Adam, SGD +from kornia.filters import gaussian_blur2d +from kornia.geometry.transform import resize +from kornia.morphology import erosion +from torch.nn import functional as F +import numpy as np +import cv2 + +from ...saicinpainting.evaluation.data import pad_tensor_to_modulo +from ...saicinpainting.evaluation.utils import move_to_device +from ...saicinpainting.training.modules.ffc import FFCResnetBlock +from ...saicinpainting.training.modules.pix2pixhd import ResnetBlock + +from tqdm import tqdm + + +def _pyrdown(im : torch.Tensor, downsize : tuple=None): + """downscale the image""" + if downsize is None: + downsize = (im.shape[2]//2, im.shape[3]//2) + assert im.shape[1] == 3, "Expected shape for the input to be (n,3,height,width)" + im = gaussian_blur2d(im, kernel_size=(5,5), sigma=(1.0,1.0)) + im = F.interpolate(im, size=downsize, mode='bilinear', align_corners=False) + return im + +def _pyrdown_mask(mask : torch.Tensor, downsize : tuple=None, eps : float=1e-8, blur_mask : bool=True, round_up : bool=True): + """downscale the mask tensor + + Parameters + ---------- + mask : torch.Tensor + mask of size (B, 1, H, W) + downsize : tuple, optional + size to downscale to. If None, image is downscaled to half, by default None + eps : float, optional + threshold value for binarizing the mask, by default 1e-8 + blur_mask : bool, optional + if True, apply gaussian filter before downscaling, by default True + round_up : bool, optional + if True, values above eps are marked 1, else, values below 1-eps are marked 0, by default True + + Returns + ------- + torch.Tensor + downscaled mask + """ + + if downsize is None: + downsize = (mask.shape[2]//2, mask.shape[3]//2) + assert mask.shape[1] == 1, "Expected shape for the input to be (n,1,height,width)" + if blur_mask == True: + mask = gaussian_blur2d(mask, kernel_size=(5,5), sigma=(1.0,1.0)) + mask = F.interpolate(mask, size=downsize, mode='bilinear', align_corners=False) + else: + mask = F.interpolate(mask, size=downsize, mode='bilinear', align_corners=False) + if round_up: + mask[mask>=eps] = 1 + mask[mask<eps] = 0 + else: + mask[mask>=1.0-eps] = 1 + mask[mask<1.0-eps] = 0 + return mask + +def _erode_mask(mask : torch.Tensor, ekernel : torch.Tensor=None, eps : float=1e-8): + """erode the mask, and set gray pixels to 0""" + if ekernel is not None: + mask = erosion(mask, ekernel) + mask[mask>=1.0-eps] = 1 + mask[mask<1.0-eps] = 0 + return mask + + +def _l1_loss( + pred : torch.Tensor, pred_downscaled : torch.Tensor, ref : torch.Tensor, + mask : torch.Tensor, mask_downscaled : torch.Tensor, + image : torch.Tensor, on_pred : bool=True + ): + """l1 loss on src pixels, and downscaled predictions if on_pred=True""" + loss = torch.mean(torch.abs(pred[mask<1e-8] - image[mask<1e-8])) + if on_pred: + loss += torch.mean(torch.abs(pred_downscaled[mask_downscaled>=1e-8] - ref[mask_downscaled>=1e-8])) + return loss + +def _infer( + image : torch.Tensor, mask : torch.Tensor, + forward_front : nn.Module, forward_rears : nn.Module, + ref_lower_res : torch.Tensor, orig_shape : tuple, devices : list, + scale_ind : int, n_iters : int=15, lr : float=0.002): + """Performs inference with refinement at a given scale. + + Parameters + ---------- + image : torch.Tensor + input image to be inpainted, of size (1,3,H,W) + mask : torch.Tensor + input inpainting mask, of size (1,1,H,W) + forward_front : nn.Module + the front part of the inpainting network + forward_rears : nn.Module + the rear part of the inpainting network + ref_lower_res : torch.Tensor + the inpainting at previous scale, used as reference image + orig_shape : tuple + shape of the original input image before padding + devices : list + list of available devices + scale_ind : int + the scale index + n_iters : int, optional + number of iterations of refinement, by default 15 + lr : float, optional + learning rate, by default 0.002 + + Returns + ------- + torch.Tensor + inpainted image + """ + masked_image = image * (1 - mask) + masked_image = torch.cat([masked_image, mask], dim=1) + + mask = mask.repeat(1,3,1,1) + if ref_lower_res is not None: + ref_lower_res = ref_lower_res.detach() + with torch.no_grad(): + z1,z2 = forward_front(masked_image) + # Inference + mask = mask.to(devices[-1]) + ekernel = torch.from_numpy(cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(15,15)).astype(bool)).float() + ekernel = ekernel.to(devices[-1]) + image = image.to(devices[-1]) + z1, z2 = z1.detach().to(devices[0]), z2.detach().to(devices[0]) + z1.requires_grad, z2.requires_grad = True, True + + optimizer = Adam([z1,z2], lr=lr) + + pbar = tqdm(range(n_iters), leave=False) + for idi in pbar: + optimizer.zero_grad() + input_feat = (z1,z2) + for idd, forward_rear in enumerate(forward_rears): + output_feat = forward_rear(input_feat) + if idd < len(devices) - 1: + midz1, midz2 = output_feat + midz1, midz2 = midz1.to(devices[idd+1]), midz2.to(devices[idd+1]) + input_feat = (midz1, midz2) + else: + pred = output_feat + + if ref_lower_res is None: + break + losses = {} + ######################### multi-scale ############################# + # scaled loss with downsampler + pred_downscaled = _pyrdown(pred[:,:,:orig_shape[0],:orig_shape[1]]) + mask_downscaled = _pyrdown_mask(mask[:,:1,:orig_shape[0],:orig_shape[1]], blur_mask=False, round_up=False) + mask_downscaled = _erode_mask(mask_downscaled, ekernel=ekernel) + mask_downscaled = mask_downscaled.repeat(1,3,1,1) + losses["ms_l1"] = _l1_loss(pred, pred_downscaled, ref_lower_res, mask, mask_downscaled, image, on_pred=True) + + loss = sum(losses.values()) + pbar.set_description("Refining scale {} using scale {} ...current loss: {:.4f}".format(scale_ind+1, scale_ind, loss.item())) + if idi < n_iters - 1: + loss.backward() + optimizer.step() + del pred_downscaled + del loss + del pred + # "pred" is the prediction after Plug-n-Play module + inpainted = mask * pred + (1 - mask) * image + inpainted = inpainted.detach().cpu() + return inpainted + +def _get_image_mask_pyramid(batch : dict, min_side : int, max_scales : int, px_budget : int): + """Build the image mask pyramid + + Parameters + ---------- + batch : dict + batch containing image, mask, etc + min_side : int + minimum side length to limit the number of scales of the pyramid + max_scales : int + maximum number of scales allowed + px_budget : int + the product H*W cannot exceed this budget, because of resource constraints + + Returns + ------- + tuple + image-mask pyramid in the form of list of images and list of masks + """ + + assert batch['image'].shape[0] == 1, "refiner works on only batches of size 1!" + + h, w = batch['unpad_to_size'] + h, w = h[0].item(), w[0].item() + + image = batch['image'][...,:h,:w] + mask = batch['mask'][...,:h,:w] + if h*w > px_budget: + #resize + ratio = np.sqrt(px_budget / float(h*w)) + h_orig, w_orig = h, w + h,w = int(h*ratio), int(w*ratio) + print(f"Original image too large for refinement! Resizing {(h_orig,w_orig)} to {(h,w)}...") + image = resize(image, (h,w),interpolation='bilinear', align_corners=False) + mask = resize(mask, (h,w),interpolation='bilinear', align_corners=False) + mask[mask>1e-8] = 1 + breadth = min(h,w) + n_scales = min(1 + int(round(max(0,np.log2(breadth / min_side)))), max_scales) + ls_images = [] + ls_masks = [] + + ls_images.append(image) + ls_masks.append(mask) + + for _ in range(n_scales - 1): + image_p = _pyrdown(ls_images[-1]) + mask_p = _pyrdown_mask(ls_masks[-1]) + ls_images.append(image_p) + ls_masks.append(mask_p) + # reverse the lists because we want the lowest resolution image as index 0 + return ls_images[::-1], ls_masks[::-1] + +def refine_predict( + batch : dict, inpainter : nn.Module, gpu_ids : str, + modulo : int, n_iters : int, lr : float, min_side : int, + max_scales : int, px_budget : int + ): + """Refines the inpainting of the network + + Parameters + ---------- + batch : dict + image-mask batch, currently we assume the batchsize to be 1 + inpainter : nn.Module + the inpainting neural network + gpu_ids : str + the GPU ids of the machine to use. If only single GPU, use: "0," + modulo : int + pad the image to ensure dimension % modulo == 0 + n_iters : int + number of iterations of refinement for each scale + lr : float + learning rate + min_side : int + all sides of image on all scales should be >= min_side / sqrt(2) + max_scales : int + max number of downscaling scales for the image-mask pyramid + px_budget : int + pixels budget. Any image will be resized to satisfy height*width <= px_budget + + Returns + ------- + torch.Tensor + inpainted image of size (1,3,H,W) + """ + + assert not inpainter.training + assert not inpainter.add_noise_kwargs + assert inpainter.concat_mask + + gpu_ids = [f'cuda:{gpuid}' for gpuid in gpu_ids.replace(" ","").split(",") if gpuid.isdigit()] + n_resnet_blocks = 0 + first_resblock_ind = 0 + found_first_resblock = False + for idl in range(len(inpainter.generator.model)): + if isinstance(inpainter.generator.model[idl], FFCResnetBlock) or isinstance(inpainter.generator.model[idl], ResnetBlock): + n_resnet_blocks += 1 + found_first_resblock = True + elif not found_first_resblock: + first_resblock_ind += 1 + resblocks_per_gpu = n_resnet_blocks // len(gpu_ids) + + devices = [torch.device(gpu_id) for gpu_id in gpu_ids] + + # split the model into front, and rear parts + forward_front = inpainter.generator.model[0:first_resblock_ind] + forward_front.to(devices[0]) + forward_rears = [] + for idd in range(len(gpu_ids)): + if idd < len(gpu_ids) - 1: + forward_rears.append(inpainter.generator.model[first_resblock_ind + resblocks_per_gpu*(idd):first_resblock_ind+resblocks_per_gpu*(idd+1)]) + else: + forward_rears.append(inpainter.generator.model[first_resblock_ind + resblocks_per_gpu*(idd):]) + forward_rears[idd].to(devices[idd]) + + ls_images, ls_masks = _get_image_mask_pyramid( + batch, + min_side, + max_scales, + px_budget + ) + image_inpainted = None + + for ids, (image, mask) in enumerate(zip(ls_images, ls_masks)): + orig_shape = image.shape[2:] + image = pad_tensor_to_modulo(image, modulo) + mask = pad_tensor_to_modulo(mask, modulo) + mask[mask >= 1e-8] = 1.0 + mask[mask < 1e-8] = 0.0 + image, mask = move_to_device(image, devices[0]), move_to_device(mask, devices[0]) + if image_inpainted is not None: + image_inpainted = move_to_device(image_inpainted, devices[-1]) + image_inpainted = _infer(image, mask, forward_front, forward_rears, image_inpainted, orig_shape, devices, ids, n_iters, lr) + image_inpainted = image_inpainted[:,:,:orig_shape[0], :orig_shape[1]] + # detach everything to save resources + image = image.detach().cpu() + mask = mask.detach().cpu() + + return image_inpainted diff --git a/lama/saicinpainting/evaluation/utils.py b/lama/saicinpainting/evaluation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d7c15c9242ed8a9bc59fbb3b450cca394720bb8 --- /dev/null +++ b/lama/saicinpainting/evaluation/utils.py @@ -0,0 +1,28 @@ +from enum import Enum + +import yaml +from easydict import EasyDict as edict +import torch.nn as nn +import torch + + +def load_yaml(path): + with open(path, 'r') as f: + return edict(yaml.safe_load(f)) + + +def move_to_device(obj, device): + if isinstance(obj, nn.Module): + return obj.to(device) + if torch.is_tensor(obj): + return obj.to(device) + if isinstance(obj, (tuple, list)): + return [move_to_device(el, device) for el in obj] + if isinstance(obj, dict): + return {name: move_to_device(val, device) for name, val in obj.items()} + raise ValueError(f'Unexpected type {type(obj)}') + + +class SmallMode(Enum): + DROP = "drop" + UPSCALE = "upscale" diff --git a/lama/saicinpainting/evaluation/vis.py b/lama/saicinpainting/evaluation/vis.py new file mode 100644 index 0000000000000000000000000000000000000000..c2910b4ef8c61efee72dabd0531a9b669ec8bf98 --- /dev/null +++ b/lama/saicinpainting/evaluation/vis.py @@ -0,0 +1,37 @@ +import numpy as np +from skimage import io +from skimage.segmentation import mark_boundaries + + +def save_item_for_vis(item, out_file): + mask = item['mask'] > 0.5 + if mask.ndim == 3: + mask = mask[0] + img = mark_boundaries(np.transpose(item['image'], (1, 2, 0)), + mask, + color=(1., 0., 0.), + outline_color=(1., 1., 1.), + mode='thick') + + if 'inpainted' in item: + inp_img = mark_boundaries(np.transpose(item['inpainted'], (1, 2, 0)), + mask, + color=(1., 0., 0.), + mode='outer') + img = np.concatenate((img, inp_img), axis=1) + + img = np.clip(img * 255, 0, 255).astype('uint8') + io.imsave(out_file, img) + + +def save_mask_for_sidebyside(item, out_file): + mask = item['mask']# > 0.5 + if mask.ndim == 3: + mask = mask[0] + mask = np.clip(mask * 255, 0, 255).astype('uint8') + io.imsave(out_file, mask) + +def save_img_for_sidebyside(item, out_file): + img = np.transpose(item['image'], (1, 2, 0)) + img = np.clip(img * 255, 0, 255).astype('uint8') + io.imsave(out_file, img) \ No newline at end of file diff --git a/lama/saicinpainting/training/__init__.py b/lama/saicinpainting/training/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/training/data/__init__.py b/lama/saicinpainting/training/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/training/data/aug.py b/lama/saicinpainting/training/data/aug.py new file mode 100644 index 0000000000000000000000000000000000000000..b1246250924e79511b58cd3d7ab79de8012f8949 --- /dev/null +++ b/lama/saicinpainting/training/data/aug.py @@ -0,0 +1,84 @@ +from albumentations import DualIAATransform, to_tuple +import imgaug.augmenters as iaa + +class IAAAffine2(DualIAATransform): + """Place a regular grid of points on the input and randomly move the neighbourhood of these point around + via affine transformations. + + Note: This class introduce interpolation artifacts to mask if it has values other than {0;1} + + Args: + p (float): probability of applying the transform. Default: 0.5. + + Targets: + image, mask + """ + + def __init__( + self, + scale=(0.7, 1.3), + translate_percent=None, + translate_px=None, + rotate=0.0, + shear=(-0.1, 0.1), + order=1, + cval=0, + mode="reflect", + always_apply=False, + p=0.5, + ): + super(IAAAffine2, self).__init__(always_apply, p) + self.scale = dict(x=scale, y=scale) + self.translate_percent = to_tuple(translate_percent, 0) + self.translate_px = to_tuple(translate_px, 0) + self.rotate = to_tuple(rotate) + self.shear = dict(x=shear, y=shear) + self.order = order + self.cval = cval + self.mode = mode + + @property + def processor(self): + return iaa.Affine( + self.scale, + self.translate_percent, + self.translate_px, + self.rotate, + self.shear, + self.order, + self.cval, + self.mode, + ) + + def get_transform_init_args_names(self): + return ("scale", "translate_percent", "translate_px", "rotate", "shear", "order", "cval", "mode") + + +class IAAPerspective2(DualIAATransform): + """Perform a random four point perspective transform of the input. + + Note: This class introduce interpolation artifacts to mask if it has values other than {0;1} + + Args: + scale ((float, float): standard deviation of the normal distributions. These are used to sample + the random distances of the subimage's corners from the full image's corners. Default: (0.05, 0.1). + p (float): probability of applying the transform. Default: 0.5. + + Targets: + image, mask + """ + + def __init__(self, scale=(0.05, 0.1), keep_size=True, always_apply=False, p=0.5, + order=1, cval=0, mode="replicate"): + super(IAAPerspective2, self).__init__(always_apply, p) + self.scale = to_tuple(scale, 1.0) + self.keep_size = keep_size + self.cval = cval + self.mode = mode + + @property + def processor(self): + return iaa.PerspectiveTransform(self.scale, keep_size=self.keep_size, mode=self.mode, cval=self.cval) + + def get_transform_init_args_names(self): + return ("scale", "keep_size") diff --git a/lama/saicinpainting/training/data/datasets.py b/lama/saicinpainting/training/data/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..22164f9e900ffddddfcd570c53ec63694b653e8b --- /dev/null +++ b/lama/saicinpainting/training/data/datasets.py @@ -0,0 +1,305 @@ +import glob +import logging +import os +import random + +import albumentations as A +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +import webdataset +from omegaconf import open_dict, OmegaConf +from skimage.feature import canny +from skimage.transform import rescale, resize +from torch.utils.data import Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset + +from ....saicinpainting.evaluation.data import InpaintingDataset as InpaintingEvaluationDataset, \ + OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset +from ....saicinpainting.training.data.aug import IAAAffine2, IAAPerspective2 +from ....saicinpainting.training.data.masks import get_mask_generator + +LOGGER = logging.getLogger(__name__) + + +class InpaintingTrainDataset(Dataset): + def __init__(self, indir, mask_generator, transform): + self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True)) + self.mask_generator = mask_generator + self.transform = transform + self.iter_i = 0 + + def __len__(self): + return len(self.in_files) + + def __getitem__(self, item): + path = self.in_files[item] + img = cv2.imread(path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = self.transform(image=img)['image'] + img = np.transpose(img, (2, 0, 1)) + # TODO: maybe generate mask before augmentations? slower, but better for segmentation-based masks + mask = self.mask_generator(img, iter_i=self.iter_i) + self.iter_i += 1 + return dict(image=img, + mask=mask) + + +class InpaintingTrainWebDataset(IterableDataset): + def __init__(self, indir, mask_generator, transform, shuffle_buffer=200): + self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode('rgb').to_tuple('jpg') + self.mask_generator = mask_generator + self.transform = transform + + def __iter__(self): + for iter_i, (img,) in enumerate(self.impl): + img = np.clip(img * 255, 0, 255).astype('uint8') + img = self.transform(image=img)['image'] + img = np.transpose(img, (2, 0, 1)) + mask = self.mask_generator(img, iter_i=iter_i) + yield dict(image=img, + mask=mask) + + +class ImgSegmentationDataset(Dataset): + def __init__(self, indir, mask_generator, transform, out_size, segm_indir, semantic_seg_n_classes): + self.indir = indir + self.segm_indir = segm_indir + self.mask_generator = mask_generator + self.transform = transform + self.out_size = out_size + self.semantic_seg_n_classes = semantic_seg_n_classes + # self.in_files = list(glob.glob(os.path.join(indir, '**', '*.jpg'), recursive=True)) + self.in_files = os.listdir(os.path.join(indir)) + + def __len__(self): + return len(self.in_files) + + def __getitem__(self, item): + path = self.in_files[item] + img = cv2.imread(path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = cv2.resize(img, (self.out_size, self.out_size)) + img = self.transform(image=img)['image'] + img = np.transpose(img, (2, 0, 1)) + mask = self.mask_generator(img) + segm, segm_classes= self.load_semantic_segm(path) + result = dict(image=img, + mask=mask, + segm=segm, + segm_classes=segm_classes) + return result + + def load_semantic_segm(self, img_path): + segm_path = img_path.replace(self.indir, self.segm_indir).replace(".jpg", ".png") + mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE) + mask = cv2.resize(mask, (self.out_size, self.out_size)) + tensor = torch.from_numpy(np.clip(mask.astype(int)-1, 0, None)) + ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) # w x h x n_classes + return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0) + + +def get_transforms(transform_variant, out_size): + if transform_variant == 'default': + transform = A.Compose([ + A.RandomScale(scale_limit=0.2), # +/- 20% + A.PadIfNeeded(min_height=out_size, min_width=out_size), + A.RandomCrop(height=out_size, width=out_size), + A.HorizontalFlip(), + A.CLAHE(), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), + A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), + A.ToFloat() + ]) + elif transform_variant == 'distortions': + transform = A.Compose([ + IAAPerspective2(scale=(0.0, 0.06)), + IAAAffine2(scale=(0.7, 1.3), + rotate=(-40, 40), + shear=(-0.1, 0.1)), + A.PadIfNeeded(min_height=out_size, min_width=out_size), + A.OpticalDistortion(), + A.RandomCrop(height=out_size, width=out_size), + A.HorizontalFlip(), + A.CLAHE(), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), + A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), + A.ToFloat() + ]) + elif transform_variant == 'distortions_scale05_1': + transform = A.Compose([ + IAAPerspective2(scale=(0.0, 0.06)), + IAAAffine2(scale=(0.5, 1.0), + rotate=(-40, 40), + shear=(-0.1, 0.1), + p=1), + A.PadIfNeeded(min_height=out_size, min_width=out_size), + A.OpticalDistortion(), + A.RandomCrop(height=out_size, width=out_size), + A.HorizontalFlip(), + A.CLAHE(), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), + A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), + A.ToFloat() + ]) + elif transform_variant == 'distortions_scale03_12': + transform = A.Compose([ + IAAPerspective2(scale=(0.0, 0.06)), + IAAAffine2(scale=(0.3, 1.2), + rotate=(-40, 40), + shear=(-0.1, 0.1), + p=1), + A.PadIfNeeded(min_height=out_size, min_width=out_size), + A.OpticalDistortion(), + A.RandomCrop(height=out_size, width=out_size), + A.HorizontalFlip(), + A.CLAHE(), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), + A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), + A.ToFloat() + ]) + elif transform_variant == 'distortions_scale03_07': + transform = A.Compose([ + IAAPerspective2(scale=(0.0, 0.06)), + IAAAffine2(scale=(0.3, 0.7), # scale 512 to 256 in average + rotate=(-40, 40), + shear=(-0.1, 0.1), + p=1), + A.PadIfNeeded(min_height=out_size, min_width=out_size), + A.OpticalDistortion(), + A.RandomCrop(height=out_size, width=out_size), + A.HorizontalFlip(), + A.CLAHE(), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), + A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), + A.ToFloat() + ]) + elif transform_variant == 'distortions_light': + transform = A.Compose([ + IAAPerspective2(scale=(0.0, 0.02)), + IAAAffine2(scale=(0.8, 1.8), + rotate=(-20, 20), + shear=(-0.03, 0.03)), + A.PadIfNeeded(min_height=out_size, min_width=out_size), + A.RandomCrop(height=out_size, width=out_size), + A.HorizontalFlip(), + A.CLAHE(), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), + A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), + A.ToFloat() + ]) + elif transform_variant == 'non_space_transform': + transform = A.Compose([ + A.CLAHE(), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), + A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), + A.ToFloat() + ]) + elif transform_variant == 'no_augs': + transform = A.Compose([ + A.ToFloat() + ]) + else: + raise ValueError(f'Unexpected transform_variant {transform_variant}') + return transform + + +def make_default_train_dataloader(indir, kind='default', out_size=512, mask_gen_kwargs=None, transform_variant='default', + mask_generator_kind="mixed", dataloader_kwargs=None, ddp_kwargs=None, **kwargs): + LOGGER.info(f'Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}') + + mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs) + transform = get_transforms(transform_variant, out_size) + + if kind == 'default': + dataset = InpaintingTrainDataset(indir=indir, + mask_generator=mask_generator, + transform=transform, + **kwargs) + elif kind == 'default_web': + dataset = InpaintingTrainWebDataset(indir=indir, + mask_generator=mask_generator, + transform=transform, + **kwargs) + elif kind == 'img_with_segm': + dataset = ImgSegmentationDataset(indir=indir, + mask_generator=mask_generator, + transform=transform, + out_size=out_size, + **kwargs) + else: + raise ValueError(f'Unknown train dataset kind {kind}') + + if dataloader_kwargs is None: + dataloader_kwargs = {} + + is_dataset_only_iterable = kind in ('default_web',) + + if ddp_kwargs is not None and not is_dataset_only_iterable: + dataloader_kwargs['shuffle'] = False + dataloader_kwargs['sampler'] = DistributedSampler(dataset, **ddp_kwargs) + + if is_dataset_only_iterable and 'shuffle' in dataloader_kwargs: + with open_dict(dataloader_kwargs): + del dataloader_kwargs['shuffle'] + + dataloader = DataLoader(dataset, **dataloader_kwargs) + return dataloader + + +def make_default_val_dataset(indir, kind='default', out_size=512, transform_variant='default', **kwargs): + if OmegaConf.is_list(indir) or isinstance(indir, (tuple, list)): + return ConcatDataset([ + make_default_val_dataset(idir, kind=kind, out_size=out_size, transform_variant=transform_variant, **kwargs) for idir in indir + ]) + + LOGGER.info(f'Make val dataloader {kind} from {indir}') + mask_generator = get_mask_generator(kind=kwargs.get("mask_generator_kind"), kwargs=kwargs.get("mask_gen_kwargs")) + + if transform_variant is not None: + transform = get_transforms(transform_variant, out_size) + + if kind == 'default': + dataset = InpaintingEvaluationDataset(indir, **kwargs) + elif kind == 'our_eval': + dataset = OurInpaintingEvaluationDataset(indir, **kwargs) + elif kind == 'img_with_segm': + dataset = ImgSegmentationDataset(indir=indir, + mask_generator=mask_generator, + transform=transform, + out_size=out_size, + **kwargs) + elif kind == 'online': + dataset = InpaintingEvalOnlineDataset(indir=indir, + mask_generator=mask_generator, + transform=transform, + out_size=out_size, + **kwargs) + else: + raise ValueError(f'Unknown val dataset kind {kind}') + + return dataset + + +def make_default_val_dataloader(*args, dataloader_kwargs=None, **kwargs): + dataset = make_default_val_dataset(*args, **kwargs) + + if dataloader_kwargs is None: + dataloader_kwargs = {} + dataloader = DataLoader(dataset, **dataloader_kwargs) + return dataloader + + +def make_constant_area_crop_params(img_height, img_width, min_size=128, max_size=512, area=256*256, round_to_mod=16): + min_size = min(img_height, img_width, min_size) + max_size = min(img_height, img_width, max_size) + if random.random() < 0.5: + out_height = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod)) + out_width = min(max_size, ceil_modulo(area // out_height, round_to_mod)) + else: + out_width = min(max_size, ceil_modulo(random.randint(min_size, max_size), round_to_mod)) + out_height = min(max_size, ceil_modulo(area // out_width, round_to_mod)) + + start_y = random.randint(0, img_height - out_height) + start_x = random.randint(0, img_width - out_width) + return (start_y, start_x, out_height, out_width) diff --git a/lama/saicinpainting/training/data/masks.py b/lama/saicinpainting/training/data/masks.py new file mode 100644 index 0000000000000000000000000000000000000000..e1f58dee6505bf5eb2fc7da14e18c1195d0df3a6 --- /dev/null +++ b/lama/saicinpainting/training/data/masks.py @@ -0,0 +1,332 @@ +import math +import random +import hashlib +import logging +from enum import Enum + +import cv2 +import numpy as np + +from ....saicinpainting.evaluation.masks.mask import SegmentationMask +from ....saicinpainting.utils import LinearRamp + +LOGGER = logging.getLogger(__name__) + + +class DrawMethod(Enum): + LINE = 'line' + CIRCLE = 'circle' + SQUARE = 'square' + + +def make_random_irregular_mask(shape, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10, + draw_method=DrawMethod.LINE): + draw_method = DrawMethod(draw_method) + + height, width = shape + mask = np.zeros((height, width), np.float32) + times = np.random.randint(min_times, max_times + 1) + for i in range(times): + start_x = np.random.randint(width) + start_y = np.random.randint(height) + for j in range(1 + np.random.randint(5)): + angle = 0.01 + np.random.randint(max_angle) + if i % 2 == 0: + angle = 2 * 3.1415926 - angle + length = 10 + np.random.randint(max_len) + brush_w = 5 + np.random.randint(max_width) + end_x = np.clip((start_x + length * np.sin(angle)).astype(np.int32), 0, width) + end_y = np.clip((start_y + length * np.cos(angle)).astype(np.int32), 0, height) + if draw_method == DrawMethod.LINE: + cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_w) + elif draw_method == DrawMethod.CIRCLE: + cv2.circle(mask, (start_x, start_y), radius=brush_w, color=1., thickness=-1) + elif draw_method == DrawMethod.SQUARE: + radius = brush_w // 2 + mask[start_y - radius:start_y + radius, start_x - radius:start_x + radius] = 1 + start_x, start_y = end_x, end_y + return mask[None, ...] + + +class RandomIrregularMaskGenerator: + def __init__(self, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10, ramp_kwargs=None, + draw_method=DrawMethod.LINE): + self.max_angle = max_angle + self.max_len = max_len + self.max_width = max_width + self.min_times = min_times + self.max_times = max_times + self.draw_method = draw_method + self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None + + def __call__(self, img, iter_i=None, raw_image=None): + coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1 + cur_max_len = int(max(1, self.max_len * coef)) + cur_max_width = int(max(1, self.max_width * coef)) + cur_max_times = int(self.min_times + 1 + (self.max_times - self.min_times) * coef) + return make_random_irregular_mask(img.shape[1:], max_angle=self.max_angle, max_len=cur_max_len, + max_width=cur_max_width, min_times=self.min_times, max_times=cur_max_times, + draw_method=self.draw_method) + + +def make_random_rectangle_mask(shape, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3): + height, width = shape + mask = np.zeros((height, width), np.float32) + bbox_max_size = min(bbox_max_size, height - margin * 2, width - margin * 2) + times = np.random.randint(min_times, max_times + 1) + for i in range(times): + box_width = np.random.randint(bbox_min_size, bbox_max_size) + box_height = np.random.randint(bbox_min_size, bbox_max_size) + start_x = np.random.randint(margin, width - margin - box_width + 1) + start_y = np.random.randint(margin, height - margin - box_height + 1) + mask[start_y:start_y + box_height, start_x:start_x + box_width] = 1 + return mask[None, ...] + + +class RandomRectangleMaskGenerator: + def __init__(self, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3, ramp_kwargs=None): + self.margin = margin + self.bbox_min_size = bbox_min_size + self.bbox_max_size = bbox_max_size + self.min_times = min_times + self.max_times = max_times + self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None + + def __call__(self, img, iter_i=None, raw_image=None): + coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1 + cur_bbox_max_size = int(self.bbox_min_size + 1 + (self.bbox_max_size - self.bbox_min_size) * coef) + cur_max_times = int(self.min_times + (self.max_times - self.min_times) * coef) + return make_random_rectangle_mask(img.shape[1:], margin=self.margin, bbox_min_size=self.bbox_min_size, + bbox_max_size=cur_bbox_max_size, min_times=self.min_times, + max_times=cur_max_times) + + +class RandomSegmentationMaskGenerator: + def __init__(self, **kwargs): + self.impl = None # will be instantiated in first call (effectively in subprocess) + self.kwargs = kwargs + + def __call__(self, img, iter_i=None, raw_image=None): + if self.impl is None: + self.impl = SegmentationMask(**self.kwargs) + + masks = self.impl.get_masks(np.transpose(img, (1, 2, 0))) + masks = [m for m in masks if len(np.unique(m)) > 1] + return np.random.choice(masks) + + +def make_random_superres_mask(shape, min_step=2, max_step=4, min_width=1, max_width=3): + height, width = shape + mask = np.zeros((height, width), np.float32) + step_x = np.random.randint(min_step, max_step + 1) + width_x = np.random.randint(min_width, min(step_x, max_width + 1)) + offset_x = np.random.randint(0, step_x) + + step_y = np.random.randint(min_step, max_step + 1) + width_y = np.random.randint(min_width, min(step_y, max_width + 1)) + offset_y = np.random.randint(0, step_y) + + for dy in range(width_y): + mask[offset_y + dy::step_y] = 1 + for dx in range(width_x): + mask[:, offset_x + dx::step_x] = 1 + return mask[None, ...] + + +class RandomSuperresMaskGenerator: + def __init__(self, **kwargs): + self.kwargs = kwargs + + def __call__(self, img, iter_i=None): + return make_random_superres_mask(img.shape[1:], **self.kwargs) + + +class DumbAreaMaskGenerator: + min_ratio = 0.1 + max_ratio = 0.35 + default_ratio = 0.225 + + def __init__(self, is_training): + #Parameters: + # is_training(bool): If true - random rectangular mask, if false - central square mask + self.is_training = is_training + + def _random_vector(self, dimension): + if self.is_training: + lower_limit = math.sqrt(self.min_ratio) + upper_limit = math.sqrt(self.max_ratio) + mask_side = round((random.random() * (upper_limit - lower_limit) + lower_limit) * dimension) + u = random.randint(0, dimension-mask_side-1) + v = u+mask_side + else: + margin = (math.sqrt(self.default_ratio) / 2) * dimension + u = round(dimension/2 - margin) + v = round(dimension/2 + margin) + return u, v + + def __call__(self, img, iter_i=None, raw_image=None): + c, height, width = img.shape + mask = np.zeros((height, width), np.float32) + x1, x2 = self._random_vector(width) + y1, y2 = self._random_vector(height) + mask[x1:x2, y1:y2] = 1 + return mask[None, ...] + + +class OutpaintingMaskGenerator: + def __init__(self, min_padding_percent:float=0.04, max_padding_percent:int=0.25, left_padding_prob:float=0.5, top_padding_prob:float=0.5, + right_padding_prob:float=0.5, bottom_padding_prob:float=0.5, is_fixed_randomness:bool=False): + """ + is_fixed_randomness - get identical paddings for the same image if args are the same + """ + self.min_padding_percent = min_padding_percent + self.max_padding_percent = max_padding_percent + self.probs = [left_padding_prob, top_padding_prob, right_padding_prob, bottom_padding_prob] + self.is_fixed_randomness = is_fixed_randomness + + assert self.min_padding_percent <= self.max_padding_percent + assert self.max_padding_percent > 0 + assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]" + assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}" + assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}" + if len([x for x in self.probs if x > 0]) == 1: + LOGGER.warning(f"Only one padding prob is greater than zero - {self.probs}. That means that the outpainting masks will be always on the same side") + + def apply_padding(self, mask, coord): + mask[int(coord[0][0]*self.img_h):int(coord[1][0]*self.img_h), + int(coord[0][1]*self.img_w):int(coord[1][1]*self.img_w)] = 1 + return mask + + def get_padding(self, size): + n1 = int(self.min_padding_percent*size) + n2 = int(self.max_padding_percent*size) + return self.rnd.randint(n1, n2) / size + + @staticmethod + def _img2rs(img): + arr = np.ascontiguousarray(img.astype(np.uint8)) + str_hash = hashlib.sha1(arr).hexdigest() + res = hash(str_hash)%(2**32) + return res + + def __call__(self, img, iter_i=None, raw_image=None): + c, self.img_h, self.img_w = img.shape + mask = np.zeros((self.img_h, self.img_w), np.float32) + at_least_one_mask_applied = False + + if self.is_fixed_randomness: + assert raw_image is not None, f"Cant calculate hash on raw_image=None" + rs = self._img2rs(raw_image) + self.rnd = np.random.RandomState(rs) + else: + self.rnd = np.random + + coords = [[ + (0,0), + (1,self.get_padding(size=self.img_h)) + ], + [ + (0,0), + (self.get_padding(size=self.img_w),1) + ], + [ + (0,1-self.get_padding(size=self.img_h)), + (1,1) + ], + [ + (1-self.get_padding(size=self.img_w),0), + (1,1) + ]] + + for pp, coord in zip(self.probs, coords): + if self.rnd.random() < pp: + at_least_one_mask_applied = True + mask = self.apply_padding(mask=mask, coord=coord) + + if not at_least_one_mask_applied: + idx = self.rnd.choice(range(len(coords)), p=np.array(self.probs)/sum(self.probs)) + mask = self.apply_padding(mask=mask, coord=coords[idx]) + return mask[None, ...] + + +class MixedMaskGenerator: + def __init__(self, irregular_proba=1/3, irregular_kwargs=None, + box_proba=1/3, box_kwargs=None, + segm_proba=1/3, segm_kwargs=None, + squares_proba=0, squares_kwargs=None, + superres_proba=0, superres_kwargs=None, + outpainting_proba=0, outpainting_kwargs=None, + invert_proba=0): + self.probas = [] + self.gens = [] + + if irregular_proba > 0: + self.probas.append(irregular_proba) + if irregular_kwargs is None: + irregular_kwargs = {} + else: + irregular_kwargs = dict(irregular_kwargs) + irregular_kwargs['draw_method'] = DrawMethod.LINE + self.gens.append(RandomIrregularMaskGenerator(**irregular_kwargs)) + + if box_proba > 0: + self.probas.append(box_proba) + if box_kwargs is None: + box_kwargs = {} + self.gens.append(RandomRectangleMaskGenerator(**box_kwargs)) + + if segm_proba > 0: + self.probas.append(segm_proba) + if segm_kwargs is None: + segm_kwargs = {} + self.gens.append(RandomSegmentationMaskGenerator(**segm_kwargs)) + + if squares_proba > 0: + self.probas.append(squares_proba) + if squares_kwargs is None: + squares_kwargs = {} + else: + squares_kwargs = dict(squares_kwargs) + squares_kwargs['draw_method'] = DrawMethod.SQUARE + self.gens.append(RandomIrregularMaskGenerator(**squares_kwargs)) + + if superres_proba > 0: + self.probas.append(superres_proba) + if superres_kwargs is None: + superres_kwargs = {} + self.gens.append(RandomSuperresMaskGenerator(**superres_kwargs)) + + if outpainting_proba > 0: + self.probas.append(outpainting_proba) + if outpainting_kwargs is None: + outpainting_kwargs = {} + self.gens.append(OutpaintingMaskGenerator(**outpainting_kwargs)) + + self.probas = np.array(self.probas, dtype='float32') + self.probas /= self.probas.sum() + self.invert_proba = invert_proba + + def __call__(self, img, iter_i=None, raw_image=None): + kind = np.random.choice(len(self.probas), p=self.probas) + gen = self.gens[kind] + result = gen(img, iter_i=iter_i, raw_image=raw_image) + if self.invert_proba > 0 and random.random() < self.invert_proba: + result = 1 - result + return result + + +def get_mask_generator(kind, kwargs): + if kind is None: + kind = "mixed" + if kwargs is None: + kwargs = {} + + if kind == "mixed": + cl = MixedMaskGenerator + elif kind == "outpainting": + cl = OutpaintingMaskGenerator + elif kind == "dumb": + cl = DumbAreaMaskGenerator + else: + raise NotImplementedError(f"No such generator kind = {kind}") + return cl(**kwargs) diff --git a/lama/saicinpainting/training/losses/__init__.py b/lama/saicinpainting/training/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lama/saicinpainting/training/losses/adversarial.py b/lama/saicinpainting/training/losses/adversarial.py new file mode 100644 index 0000000000000000000000000000000000000000..d6db2967ce5074d94ed3b4c51fc743ff2f7831b1 --- /dev/null +++ b/lama/saicinpainting/training/losses/adversarial.py @@ -0,0 +1,177 @@ +from typing import Tuple, Dict, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class BaseAdversarialLoss: + def pre_generator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + generator: nn.Module, discriminator: nn.Module): + """ + Prepare for generator step + :param real_batch: Tensor, a batch of real samples + :param fake_batch: Tensor, a batch of samples produced by generator + :param generator: + :param discriminator: + :return: None + """ + + def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + generator: nn.Module, discriminator: nn.Module): + """ + Prepare for discriminator step + :param real_batch: Tensor, a batch of real samples + :param fake_batch: Tensor, a batch of samples produced by generator + :param generator: + :param discriminator: + :return: None + """ + + def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, + mask: Optional[torch.Tensor] = None) \ + -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + """ + Calculate generator loss + :param real_batch: Tensor, a batch of real samples + :param fake_batch: Tensor, a batch of samples produced by generator + :param discr_real_pred: Tensor, discriminator output for real_batch + :param discr_fake_pred: Tensor, discriminator output for fake_batch + :param mask: Tensor, actual mask, which was at input of generator when making fake_batch + :return: total generator loss along with some values that might be interesting to log + """ + raise NotImplemented() + + def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, + mask: Optional[torch.Tensor] = None) \ + -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + """ + Calculate discriminator loss and call .backward() on it + :param real_batch: Tensor, a batch of real samples + :param fake_batch: Tensor, a batch of samples produced by generator + :param discr_real_pred: Tensor, discriminator output for real_batch + :param discr_fake_pred: Tensor, discriminator output for fake_batch + :param mask: Tensor, actual mask, which was at input of generator when making fake_batch + :return: total discriminator loss along with some values that might be interesting to log + """ + raise NotImplemented() + + def interpolate_mask(self, mask, shape): + assert mask is not None + assert self.allow_scale_mask or shape == mask.shape[-2:] + if shape != mask.shape[-2:] and self.allow_scale_mask: + if self.mask_scale_mode == 'maxpool': + mask = F.adaptive_max_pool2d(mask, shape) + else: + mask = F.interpolate(mask, size=shape, mode=self.mask_scale_mode) + return mask + +def make_r1_gp(discr_real_pred, real_batch): + if torch.is_grad_enabled(): + grad_real = torch.autograd.grad(outputs=discr_real_pred.sum(), inputs=real_batch, create_graph=True)[0] + grad_penalty = (grad_real.view(grad_real.shape[0], -1).norm(2, dim=1) ** 2).mean() + else: + grad_penalty = 0 + real_batch.requires_grad = False + + return grad_penalty + +class NonSaturatingWithR1(BaseAdversarialLoss): + def __init__(self, gp_coef=5, weight=1, mask_as_fake_target=False, allow_scale_mask=False, + mask_scale_mode='nearest', extra_mask_weight_for_gen=0, + use_unmasked_for_gen=True, use_unmasked_for_discr=True): + self.gp_coef = gp_coef + self.weight = weight + # use for discr => use for gen; + # otherwise we teach only the discr to pay attention to very small difference + assert use_unmasked_for_gen or (not use_unmasked_for_discr) + # mask as target => use unmasked for discr: + # if we don't care about unmasked regions at all + # then it doesn't matter if the value of mask_as_fake_target is true or false + assert use_unmasked_for_discr or (not mask_as_fake_target) + self.use_unmasked_for_gen = use_unmasked_for_gen + self.use_unmasked_for_discr = use_unmasked_for_discr + self.mask_as_fake_target = mask_as_fake_target + self.allow_scale_mask = allow_scale_mask + self.mask_scale_mode = mask_scale_mode + self.extra_mask_weight_for_gen = extra_mask_weight_for_gen + + def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, + mask=None) \ + -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + fake_loss = F.softplus(-discr_fake_pred) + if (self.mask_as_fake_target and self.extra_mask_weight_for_gen > 0) or \ + not self.use_unmasked_for_gen: # == if masked region should be treated differently + mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:]) + if not self.use_unmasked_for_gen: + fake_loss = fake_loss * mask + else: + pixel_weights = 1 + mask * self.extra_mask_weight_for_gen + fake_loss = fake_loss * pixel_weights + + return fake_loss.mean() * self.weight, dict() + + def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + generator: nn.Module, discriminator: nn.Module): + real_batch.requires_grad = True + + def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor, + mask=None) \ + -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + + real_loss = F.softplus(-discr_real_pred) + grad_penalty = make_r1_gp(discr_real_pred, real_batch) * self.gp_coef + fake_loss = F.softplus(discr_fake_pred) + + if not self.use_unmasked_for_discr or self.mask_as_fake_target: + # == if masked region should be treated differently + mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:]) + # use_unmasked_for_discr=False only makes sense for fakes; + # for reals there is no difference beetween two regions + fake_loss = fake_loss * mask + if self.mask_as_fake_target: + fake_loss = fake_loss + (1 - mask) * F.softplus(-discr_fake_pred) + + sum_discr_loss = real_loss + grad_penalty + fake_loss + metrics = dict(discr_real_out=discr_real_pred.mean(), + discr_fake_out=discr_fake_pred.mean(), + discr_real_gp=grad_penalty) + return sum_discr_loss.mean(), metrics + +class BCELoss(BaseAdversarialLoss): + def __init__(self, weight): + self.weight = weight + self.bce_loss = nn.BCEWithLogitsLoss() + + def generator_loss(self, discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + real_mask_gt = torch.zeros(discr_fake_pred.shape).to(discr_fake_pred.device) + fake_loss = self.bce_loss(discr_fake_pred, real_mask_gt) * self.weight + return fake_loss, dict() + + def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor, + generator: nn.Module, discriminator: nn.Module): + real_batch.requires_grad = True + + def discriminator_loss(self, + mask: torch.Tensor, + discr_real_pred: torch.Tensor, + discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + + real_mask_gt = torch.zeros(discr_real_pred.shape).to(discr_real_pred.device) + sum_discr_loss = (self.bce_loss(discr_real_pred, real_mask_gt) + self.bce_loss(discr_fake_pred, mask)) / 2 + metrics = dict(discr_real_out=discr_real_pred.mean(), + discr_fake_out=discr_fake_pred.mean(), + discr_real_gp=0) + return sum_discr_loss, metrics + + +def make_discrim_loss(kind, **kwargs): + if kind == 'r1': + return NonSaturatingWithR1(**kwargs) + elif kind == 'bce': + return BCELoss(**kwargs) + raise ValueError(f'Unknown adversarial loss kind {kind}') diff --git a/lama/saicinpainting/training/losses/constants.py b/lama/saicinpainting/training/losses/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..ae3e5e151342232be8e2c2a77fe6fd5798dc2a8c --- /dev/null +++ b/lama/saicinpainting/training/losses/constants.py @@ -0,0 +1,152 @@ +weights = {"ade20k": + [6.34517766497462, + 9.328358208955224, + 11.389521640091116, + 16.10305958132045, + 20.833333333333332, + 22.22222222222222, + 25.125628140703515, + 43.29004329004329, + 50.5050505050505, + 54.6448087431694, + 55.24861878453038, + 60.24096385542168, + 62.5, + 66.2251655629139, + 84.74576271186442, + 90.90909090909092, + 91.74311926605505, + 96.15384615384616, + 96.15384615384616, + 97.08737864077669, + 102.04081632653062, + 135.13513513513513, + 149.2537313432836, + 153.84615384615384, + 163.93442622950818, + 166.66666666666666, + 188.67924528301887, + 192.30769230769232, + 217.3913043478261, + 227.27272727272725, + 227.27272727272725, + 227.27272727272725, + 303.03030303030306, + 322.5806451612903, + 333.3333333333333, + 370.3703703703703, + 384.61538461538464, + 416.6666666666667, + 416.6666666666667, + 434.7826086956522, + 434.7826086956522, + 454.5454545454545, + 454.5454545454545, + 500.0, + 526.3157894736842, + 526.3157894736842, + 555.5555555555555, + 555.5555555555555, + 555.5555555555555, + 555.5555555555555, + 555.5555555555555, + 555.5555555555555, + 555.5555555555555, + 588.2352941176471, + 588.2352941176471, + 588.2352941176471, + 588.2352941176471, + 588.2352941176471, + 666.6666666666666, + 666.6666666666666, + 666.6666666666666, + 666.6666666666666, + 714.2857142857143, + 714.2857142857143, + 714.2857142857143, + 714.2857142857143, + 714.2857142857143, + 769.2307692307693, + 769.2307692307693, + 769.2307692307693, + 833.3333333333334, + 833.3333333333334, + 833.3333333333334, + 833.3333333333334, + 909.090909090909, + 1000.0, + 1111.111111111111, + 1111.111111111111, + 1111.111111111111, + 1111.111111111111, + 1111.111111111111, + 1250.0, + 1250.0, + 1250.0, + 1250.0, + 1250.0, + 1428.5714285714287, + 1428.5714285714287, + 1428.5714285714287, + 1428.5714285714287, + 1428.5714285714287, + 1428.5714285714287, + 1428.5714285714287, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 1666.6666666666667, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2000.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 2500.0, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 3333.3333333333335, + 5000.0, + 5000.0, + 5000.0] +} \ No newline at end of file diff --git a/lama/saicinpainting/training/losses/distance_weighting.py b/lama/saicinpainting/training/losses/distance_weighting.py new file mode 100644 index 0000000000000000000000000000000000000000..a06e2a5d775c171b5155222c41c9217bb719b613 --- /dev/null +++ b/lama/saicinpainting/training/losses/distance_weighting.py @@ -0,0 +1,126 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +from ....saicinpainting.training.losses.perceptual import IMAGENET_STD, IMAGENET_MEAN + + +def dummy_distance_weighter(real_img, pred_img, mask): + return mask + + +def get_gauss_kernel(kernel_size, width_factor=1): + coords = torch.stack(torch.meshgrid(torch.arange(kernel_size), + torch.arange(kernel_size)), + dim=0).float() + diff = torch.exp(-((coords - kernel_size // 2) ** 2).sum(0) / kernel_size / width_factor) + diff /= diff.sum() + return diff + + +class BlurMask(nn.Module): + def __init__(self, kernel_size=5, width_factor=1): + super().__init__() + self.filter = nn.Conv2d(1, 1, kernel_size, padding=kernel_size // 2, padding_mode='replicate', bias=False) + self.filter.weight.data.copy_(get_gauss_kernel(kernel_size, width_factor=width_factor)) + + def forward(self, real_img, pred_img, mask): + with torch.no_grad(): + result = self.filter(mask) * mask + return result + + +class EmulatedEDTMask(nn.Module): + def __init__(self, dilate_kernel_size=5, blur_kernel_size=5, width_factor=1): + super().__init__() + self.dilate_filter = nn.Conv2d(1, 1, dilate_kernel_size, padding=dilate_kernel_size// 2, padding_mode='replicate', + bias=False) + self.dilate_filter.weight.data.copy_(torch.ones(1, 1, dilate_kernel_size, dilate_kernel_size, dtype=torch.float)) + self.blur_filter = nn.Conv2d(1, 1, blur_kernel_size, padding=blur_kernel_size // 2, padding_mode='replicate', bias=False) + self.blur_filter.weight.data.copy_(get_gauss_kernel(blur_kernel_size, width_factor=width_factor)) + + def forward(self, real_img, pred_img, mask): + with torch.no_grad(): + known_mask = 1 - mask + dilated_known_mask = (self.dilate_filter(known_mask) > 1).float() + result = self.blur_filter(1 - dilated_known_mask) * mask + return result + + +class PropagatePerceptualSim(nn.Module): + def __init__(self, level=2, max_iters=10, temperature=500, erode_mask_size=3): + super().__init__() + vgg = torchvision.models.vgg19(pretrained=True).features + vgg_avg_pooling = [] + + for weights in vgg.parameters(): + weights.requires_grad = False + + cur_level_i = 0 + for module in vgg.modules(): + if module.__class__.__name__ == 'Sequential': + continue + elif module.__class__.__name__ == 'MaxPool2d': + vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0)) + else: + vgg_avg_pooling.append(module) + if module.__class__.__name__ == 'ReLU': + cur_level_i += 1 + if cur_level_i == level: + break + + self.features = nn.Sequential(*vgg_avg_pooling) + + self.max_iters = max_iters + self.temperature = temperature + self.do_erode = erode_mask_size > 0 + if self.do_erode: + self.erode_mask = nn.Conv2d(1, 1, erode_mask_size, padding=erode_mask_size // 2, bias=False) + self.erode_mask.weight.data.fill_(1) + + def forward(self, real_img, pred_img, mask): + with torch.no_grad(): + real_img = (real_img - IMAGENET_MEAN.to(real_img)) / IMAGENET_STD.to(real_img) + real_feats = self.features(real_img) + + vertical_sim = torch.exp(-(real_feats[:, :, 1:] - real_feats[:, :, :-1]).pow(2).sum(1, keepdim=True) + / self.temperature) + horizontal_sim = torch.exp(-(real_feats[:, :, :, 1:] - real_feats[:, :, :, :-1]).pow(2).sum(1, keepdim=True) + / self.temperature) + + mask_scaled = F.interpolate(mask, size=real_feats.shape[-2:], mode='bilinear', align_corners=False) + if self.do_erode: + mask_scaled = (self.erode_mask(mask_scaled) > 1).float() + + cur_knowness = 1 - mask_scaled + + for iter_i in range(self.max_iters): + new_top_knowness = F.pad(cur_knowness[:, :, :-1] * vertical_sim, (0, 0, 1, 0), mode='replicate') + new_bottom_knowness = F.pad(cur_knowness[:, :, 1:] * vertical_sim, (0, 0, 0, 1), mode='replicate') + + new_left_knowness = F.pad(cur_knowness[:, :, :, :-1] * horizontal_sim, (1, 0, 0, 0), mode='replicate') + new_right_knowness = F.pad(cur_knowness[:, :, :, 1:] * horizontal_sim, (0, 1, 0, 0), mode='replicate') + + new_knowness = torch.stack([new_top_knowness, new_bottom_knowness, + new_left_knowness, new_right_knowness], + dim=0).max(0).values + + cur_knowness = torch.max(cur_knowness, new_knowness) + + cur_knowness = F.interpolate(cur_knowness, size=mask.shape[-2:], mode='bilinear') + result = torch.min(mask, 1 - cur_knowness) + + return result + + +def make_mask_distance_weighter(kind='none', **kwargs): + if kind == 'none': + return dummy_distance_weighter + if kind == 'blur': + return BlurMask(**kwargs) + if kind == 'edt': + return EmulatedEDTMask(**kwargs) + if kind == 'pps': + return PropagatePerceptualSim(**kwargs) + raise ValueError(f'Unknown mask distance weighter kind {kind}') diff --git a/lama/saicinpainting/training/losses/feature_matching.py b/lama/saicinpainting/training/losses/feature_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..c019895c9178817837d1a6773367b178a861dc61 --- /dev/null +++ b/lama/saicinpainting/training/losses/feature_matching.py @@ -0,0 +1,33 @@ +from typing import List + +import torch +import torch.nn.functional as F + + +def masked_l2_loss(pred, target, mask, weight_known, weight_missing): + per_pixel_l2 = F.mse_loss(pred, target, reduction='none') + pixel_weights = mask * weight_missing + (1 - mask) * weight_known + return (pixel_weights * per_pixel_l2).mean() + + +def masked_l1_loss(pred, target, mask, weight_known, weight_missing): + per_pixel_l1 = F.l1_loss(pred, target, reduction='none') + pixel_weights = mask * weight_missing + (1 - mask) * weight_known + return (pixel_weights * per_pixel_l1).mean() + + +def feature_matching_loss(fake_features: List[torch.Tensor], target_features: List[torch.Tensor], mask=None): + if mask is None: + res = torch.stack([F.mse_loss(fake_feat, target_feat) + for fake_feat, target_feat in zip(fake_features, target_features)]).mean() + else: + res = 0 + norm = 0 + for fake_feat, target_feat in zip(fake_features, target_features): + cur_mask = F.interpolate(mask, size=fake_feat.shape[-2:], mode='bilinear', align_corners=False) + error_weights = 1 - cur_mask + cur_val = ((fake_feat - target_feat).pow(2) * error_weights).mean() + res = res + cur_val + norm += 1 + res = res / norm + return res diff --git a/lama/saicinpainting/training/losses/perceptual.py b/lama/saicinpainting/training/losses/perceptual.py new file mode 100644 index 0000000000000000000000000000000000000000..c9be8bb7612b1ad349aaf638df52148848969384 --- /dev/null +++ b/lama/saicinpainting/training/losses/perceptual.py @@ -0,0 +1,113 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +from ....models.ade20k import ModelBuilder +from ....saicinpainting.utils import check_and_warn_input_range + + +IMAGENET_MEAN = torch.FloatTensor([0.485, 0.456, 0.406])[None, :, None, None] +IMAGENET_STD = torch.FloatTensor([0.229, 0.224, 0.225])[None, :, None, None] + + +class PerceptualLoss(nn.Module): + def __init__(self, normalize_inputs=True): + super(PerceptualLoss, self).__init__() + + self.normalize_inputs = normalize_inputs + self.mean_ = IMAGENET_MEAN + self.std_ = IMAGENET_STD + + vgg = torchvision.models.vgg19(pretrained=True).features + vgg_avg_pooling = [] + + for weights in vgg.parameters(): + weights.requires_grad = False + + for module in vgg.modules(): + if module.__class__.__name__ == 'Sequential': + continue + elif module.__class__.__name__ == 'MaxPool2d': + vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0)) + else: + vgg_avg_pooling.append(module) + + self.vgg = nn.Sequential(*vgg_avg_pooling) + + def do_normalize_inputs(self, x): + return (x - self.mean_.to(x.device)) / self.std_.to(x.device) + + def partial_losses(self, input, target, mask=None): + check_and_warn_input_range(target, 0, 1, 'PerceptualLoss target in partial_losses') + + # we expect input and target to be in [0, 1] range + losses = [] + + if self.normalize_inputs: + features_input = self.do_normalize_inputs(input) + features_target = self.do_normalize_inputs(target) + else: + features_input = input + features_target = target + + for layer in self.vgg[:30]: + + features_input = layer(features_input) + features_target = layer(features_target) + + if layer.__class__.__name__ == 'ReLU': + loss = F.mse_loss(features_input, features_target, reduction='none') + + if mask is not None: + cur_mask = F.interpolate(mask, size=features_input.shape[-2:], + mode='bilinear', align_corners=False) + loss = loss * (1 - cur_mask) + + loss = loss.mean(dim=tuple(range(1, len(loss.shape)))) + losses.append(loss) + + return losses + + def forward(self, input, target, mask=None): + losses = self.partial_losses(input, target, mask=mask) + return torch.stack(losses).sum(dim=0) + + def get_global_features(self, input): + check_and_warn_input_range(input, 0, 1, 'PerceptualLoss input in get_global_features') + + if self.normalize_inputs: + features_input = self.do_normalize_inputs(input) + else: + features_input = input + + features_input = self.vgg(features_input) + return features_input + + +class ResNetPL(nn.Module): + def __init__(self, weight=1, + weights_path=None, arch_encoder='resnet50dilated', segmentation=True): + super().__init__() + self.impl = ModelBuilder.get_encoder(weights_path=weights_path, + arch_encoder=arch_encoder, + arch_decoder='ppm_deepsup', + fc_dim=2048, + segmentation=segmentation) + self.impl.eval() + for w in self.impl.parameters(): + w.requires_grad_(False) + + self.weight = weight + + def forward(self, pred, target): + pred = (pred - IMAGENET_MEAN.to(pred)) / IMAGENET_STD.to(pred) + target = (target - IMAGENET_MEAN.to(target)) / IMAGENET_STD.to(target) + + pred_feats = self.impl(pred, return_feature_maps=True) + target_feats = self.impl(target, return_feature_maps=True) + + result = torch.stack([F.mse_loss(cur_pred, cur_target) + for cur_pred, cur_target + in zip(pred_feats, target_feats)]).sum() * self.weight + return result diff --git a/lama/saicinpainting/training/losses/segmentation.py b/lama/saicinpainting/training/losses/segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..3d4a9f94eaae84722db584277dbbf9bc41ede357 --- /dev/null +++ b/lama/saicinpainting/training/losses/segmentation.py @@ -0,0 +1,43 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .constants import weights as constant_weights + + +class CrossEntropy2d(nn.Module): + def __init__(self, reduction="mean", ignore_label=255, weights=None, *args, **kwargs): + """ + weight (Tensor, optional): a manual rescaling weight given to each class. + If given, has to be a Tensor of size "nclasses" + """ + super(CrossEntropy2d, self).__init__() + self.reduction = reduction + self.ignore_label = ignore_label + self.weights = weights + if self.weights is not None: + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + self.weights = torch.FloatTensor(constant_weights[weights]).to(device) + + def forward(self, predict, target): + """ + Args: + predict:(n, c, h, w) + target:(n, 1, h, w) + """ + target = target.long() + assert not target.requires_grad + assert predict.dim() == 4, "{0}".format(predict.size()) + assert target.dim() == 4, "{0}".format(target.size()) + assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0)) + assert target.size(1) == 1, "{0}".format(target.size(1)) + assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2)) + assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3)) + target = target.squeeze(1) + n, c, h, w = predict.size() + target_mask = (target >= 0) * (target != self.ignore_label) + target = target[target_mask] + predict = predict.transpose(1, 2).transpose(2, 3).contiguous() + predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c) + loss = F.cross_entropy(predict, target, weight=self.weights, reduction=self.reduction) + return loss diff --git a/lama/saicinpainting/training/losses/style_loss.py b/lama/saicinpainting/training/losses/style_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb42d7fbc5d17a47bec7365889868505f5fdfb5 --- /dev/null +++ b/lama/saicinpainting/training/losses/style_loss.py @@ -0,0 +1,155 @@ +import torch +import torch.nn as nn +import torchvision.models as models + + +class PerceptualLoss(nn.Module): + r""" + Perceptual loss, VGG-based + https://arxiv.org/abs/1603.08155 + https://github.com/dxyang/StyleTransfer/blob/master/utils.py + """ + + def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]): + super(PerceptualLoss, self).__init__() + self.add_module('vgg', VGG19()) + self.criterion = torch.nn.L1Loss() + self.weights = weights + + def __call__(self, x, y): + # Compute features + x_vgg, y_vgg = self.vgg(x), self.vgg(y) + + content_loss = 0.0 + content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1']) + content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1']) + content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1']) + content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1']) + content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1']) + + + return content_loss + + +class VGG19(torch.nn.Module): + def __init__(self): + super(VGG19, self).__init__() + features = models.vgg19(pretrained=True).features + self.relu1_1 = torch.nn.Sequential() + self.relu1_2 = torch.nn.Sequential() + + self.relu2_1 = torch.nn.Sequential() + self.relu2_2 = torch.nn.Sequential() + + self.relu3_1 = torch.nn.Sequential() + self.relu3_2 = torch.nn.Sequential() + self.relu3_3 = torch.nn.Sequential() + self.relu3_4 = torch.nn.Sequential() + + self.relu4_1 = torch.nn.Sequential() + self.relu4_2 = torch.nn.Sequential() + self.relu4_3 = torch.nn.Sequential() + self.relu4_4 = torch.nn.Sequential() + + self.relu5_1 = torch.nn.Sequential() + self.relu5_2 = torch.nn.Sequential() + self.relu5_3 = torch.nn.Sequential() + self.relu5_4 = torch.nn.Sequential() + + for x in range(2): + self.relu1_1.add_module(str(x), features[x]) + + for x in range(2, 4): + self.relu1_2.add_module(str(x), features[x]) + + for x in range(4, 7): + self.relu2_1.add_module(str(x), features[x]) + + for x in range(7, 9): + self.relu2_2.add_module(str(x), features[x]) + + for x in range(9, 12): + self.relu3_1.add_module(str(x), features[x]) + + for x in range(12, 14): + self.relu3_2.add_module(str(x), features[x]) + + for x in range(14, 16): + self.relu3_2.add_module(str(x), features[x]) + + for x in range(16, 18): + self.relu3_4.add_module(str(x), features[x]) + + for x in range(18, 21): + self.relu4_1.add_module(str(x), features[x]) + + for x in range(21, 23): + self.relu4_2.add_module(str(x), features[x]) + + for x in range(23, 25): + self.relu4_3.add_module(str(x), features[x]) + + for x in range(25, 27): + self.relu4_4.add_module(str(x), features[x]) + + for x in range(27, 30): + self.relu5_1.add_module(str(x), features[x]) + + for x in range(30, 32): + self.relu5_2.add_module(str(x), features[x]) + + for x in range(32, 34): + self.relu5_3.add_module(str(x), features[x]) + + for x in range(34, 36): + self.relu5_4.add_module(str(x), features[x]) + + # don't need the gradients, just want the features + for param in self.parameters(): + param.requires_grad = False + + def forward(self, x): + relu1_1 = self.relu1_1(x) + relu1_2 = self.relu1_2(relu1_1) + + relu2_1 = self.relu2_1(relu1_2) + relu2_2 = self.relu2_2(relu2_1) + + relu3_1 = self.relu3_1(relu2_2) + relu3_2 = self.relu3_2(relu3_1) + relu3_3 = self.relu3_3(relu3_2) + relu3_4 = self.relu3_4(relu3_3) + + relu4_1 = self.relu4_1(relu3_4) + relu4_2 = self.relu4_2(relu4_1) + relu4_3 = self.relu4_3(relu4_2) + relu4_4 = self.relu4_4(relu4_3) + + relu5_1 = self.relu5_1(relu4_4) + relu5_2 = self.relu5_2(relu5_1) + relu5_3 = self.relu5_3(relu5_2) + relu5_4 = self.relu5_4(relu5_3) + + out = { + 'relu1_1': relu1_1, + 'relu1_2': relu1_2, + + 'relu2_1': relu2_1, + 'relu2_2': relu2_2, + + 'relu3_1': relu3_1, + 'relu3_2': relu3_2, + 'relu3_3': relu3_3, + 'relu3_4': relu3_4, + + 'relu4_1': relu4_1, + 'relu4_2': relu4_2, + 'relu4_3': relu4_3, + 'relu4_4': relu4_4, + + 'relu5_1': relu5_1, + 'relu5_2': relu5_2, + 'relu5_3': relu5_3, + 'relu5_4': relu5_4, + } + return out diff --git a/lama/saicinpainting/training/modules/__init__.py b/lama/saicinpainting/training/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..08e58bcc7e460f55fae1824e26cbf2a5e94e76e0 --- /dev/null +++ b/lama/saicinpainting/training/modules/__init__.py @@ -0,0 +1,31 @@ +import logging + +from ....saicinpainting.training.modules.ffc import FFCResNetGenerator +from ....saicinpainting.training.modules.pix2pixhd import GlobalGenerator, MultiDilatedGlobalGenerator, \ + NLayerDiscriminator, MultidilatedNLayerDiscriminator + +def make_generator(config, kind, **kwargs): + logging.info(f'Make generator {kind}') + + if kind == 'pix2pixhd_multidilated': + return MultiDilatedGlobalGenerator(**kwargs) + + if kind == 'pix2pixhd_global': + return GlobalGenerator(**kwargs) + + if kind == 'ffc_resnet': + return FFCResNetGenerator(**kwargs) + + raise ValueError(f'Unknown generator kind {kind}') + + +def make_discriminator(kind, **kwargs): + logging.info(f'Make discriminator {kind}') + + if kind == 'pix2pixhd_nlayer_multidilated': + return MultidilatedNLayerDiscriminator(**kwargs) + + if kind == 'pix2pixhd_nlayer': + return NLayerDiscriminator(**kwargs) + + raise ValueError(f'Unknown discriminator kind {kind}') diff --git a/lama/saicinpainting/training/modules/base.py b/lama/saicinpainting/training/modules/base.py new file mode 100644 index 0000000000000000000000000000000000000000..2bb07d853331ec60a655029ba2c1cfc4c7d6064b --- /dev/null +++ b/lama/saicinpainting/training/modules/base.py @@ -0,0 +1,80 @@ +import abc +from typing import Tuple, List + +import torch +import torch.nn as nn + +from ....saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv +from ....saicinpainting.training.modules.multidilated_conv import MultidilatedConv + + +class BaseDiscriminator(nn.Module): + @abc.abstractmethod + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: + """ + Predict scores and get intermediate activations. Useful for feature matching loss + :return tuple (scores, list of intermediate activations) + """ + raise NotImplemented() + + +def get_conv_block_ctor(kind='default'): + if not isinstance(kind, str): + return kind + if kind == 'default': + return nn.Conv2d + if kind == 'depthwise': + return DepthWiseSeperableConv + if kind == 'multidilated': + return MultidilatedConv + raise ValueError(f'Unknown convolutional block kind {kind}') + + +def get_norm_layer(kind='bn'): + if not isinstance(kind, str): + return kind + if kind == 'bn': + return nn.BatchNorm2d + if kind == 'in': + return nn.InstanceNorm2d + raise ValueError(f'Unknown norm block kind {kind}') + + +def get_activation(kind='tanh'): + if kind == 'tanh': + return nn.Tanh() + if kind == 'sigmoid': + return nn.Sigmoid() + if kind is False: + return nn.Identity() + raise ValueError(f'Unknown activation kind {kind}') + + +class SimpleMultiStepGenerator(nn.Module): + def __init__(self, steps: List[nn.Module]): + super().__init__() + self.steps = nn.ModuleList(steps) + + def forward(self, x): + cur_in = x + outs = [] + for step in self.steps: + cur_out = step(cur_in) + outs.append(cur_out) + cur_in = torch.cat((cur_in, cur_out), dim=1) + return torch.cat(outs[::-1], dim=1) + +def deconv_factory(kind, ngf, mult, norm_layer, activation, max_features): + if kind == 'convtranspose': + return [nn.ConvTranspose2d(min(max_features, ngf * mult), + min(max_features, int(ngf * mult / 2)), + kernel_size=3, stride=2, padding=1, output_padding=1), + norm_layer(min(max_features, int(ngf * mult / 2))), activation] + elif kind == 'bilinear': + return [nn.Upsample(scale_factor=2, mode='bilinear'), + DepthWiseSeperableConv(min(max_features, ngf * mult), + min(max_features, int(ngf * mult / 2)), + kernel_size=3, stride=1, padding=1), + norm_layer(min(max_features, int(ngf * mult / 2))), activation] + else: + raise Exception(f"Invalid deconv kind: {kind}") \ No newline at end of file diff --git a/lama/saicinpainting/training/modules/depthwise_sep_conv.py b/lama/saicinpainting/training/modules/depthwise_sep_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..83dd15c3df1d9f40baf0091a373fa224532c9ddd --- /dev/null +++ b/lama/saicinpainting/training/modules/depthwise_sep_conv.py @@ -0,0 +1,17 @@ +import torch +import torch.nn as nn + +class DepthWiseSeperableConv(nn.Module): + def __init__(self, in_dim, out_dim, *args, **kwargs): + super().__init__() + if 'groups' in kwargs: + # ignoring groups for Depthwise Sep Conv + del kwargs['groups'] + + self.depthwise = nn.Conv2d(in_dim, in_dim, *args, groups=in_dim, **kwargs) + self.pointwise = nn.Conv2d(in_dim, out_dim, kernel_size=1) + + def forward(self, x): + out = self.depthwise(x) + out = self.pointwise(out) + return out \ No newline at end of file diff --git a/lama/saicinpainting/training/modules/fake_fakes.py b/lama/saicinpainting/training/modules/fake_fakes.py new file mode 100644 index 0000000000000000000000000000000000000000..45c4ad559cef2730b771a709197e00ae1c87683c --- /dev/null +++ b/lama/saicinpainting/training/modules/fake_fakes.py @@ -0,0 +1,47 @@ +import torch +from kornia import SamplePadding +from kornia.augmentation import RandomAffine, CenterCrop + + +class FakeFakesGenerator: + def __init__(self, aug_proba=0.5, img_aug_degree=30, img_aug_translate=0.2): + self.grad_aug = RandomAffine(degrees=360, + translate=0.2, + padding_mode=SamplePadding.REFLECTION, + keepdim=False, + p=1) + self.img_aug = RandomAffine(degrees=img_aug_degree, + translate=img_aug_translate, + padding_mode=SamplePadding.REFLECTION, + keepdim=True, + p=1) + self.aug_proba = aug_proba + + def __call__(self, input_images, masks): + blend_masks = self._fill_masks_with_gradient(masks) + blend_target = self._make_blend_target(input_images) + result = input_images * (1 - blend_masks) + blend_target * blend_masks + return result, blend_masks + + def _make_blend_target(self, input_images): + batch_size = input_images.shape[0] + permuted = input_images[torch.randperm(batch_size)] + augmented = self.img_aug(input_images) + is_aug = (torch.rand(batch_size, device=input_images.device)[:, None, None, None] < self.aug_proba).float() + result = augmented * is_aug + permuted * (1 - is_aug) + return result + + def _fill_masks_with_gradient(self, masks): + batch_size, _, height, width = masks.shape + grad = torch.linspace(0, 1, steps=width * 2, device=masks.device, dtype=masks.dtype) \ + .view(1, 1, 1, -1).expand(batch_size, 1, height * 2, width * 2) + grad = self.grad_aug(grad) + grad = CenterCrop((height, width))(grad) + grad *= masks + + grad_for_min = grad + (1 - masks) * 10 + grad -= grad_for_min.view(batch_size, -1).min(-1).values[:, None, None, None] + grad /= grad.view(batch_size, -1).max(-1).values[:, None, None, None] + 1e-6 + grad.clamp_(min=0, max=1) + + return grad diff --git a/lama/saicinpainting/training/modules/ffc.py b/lama/saicinpainting/training/modules/ffc.py new file mode 100644 index 0000000000000000000000000000000000000000..acbbc9b44524b42dd3bd996dec2608d2242fb228 --- /dev/null +++ b/lama/saicinpainting/training/modules/ffc.py @@ -0,0 +1,434 @@ +# Fast Fourier Convolution NeurIPS 2020 +# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py +# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +import torch.fft +from ....saicinpainting.training.modules.base import get_activation, BaseDiscriminator +from ....saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper +from ....saicinpainting.training.modules.squeeze_excitation import SELayer +from ....saicinpainting.utils import get_shape + + +class FFCSE_block(nn.Module): + + def __init__(self, channels, ratio_g): + super(FFCSE_block, self).__init__() + in_cg = int(channels * ratio_g) + in_cl = channels - in_cg + r = 16 + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.conv1 = nn.Conv2d(channels, channels // r, + kernel_size=1, bias=True) + self.relu1 = nn.ReLU(inplace=True) + self.conv_a2l = None if in_cl == 0 else nn.Conv2d( + channels // r, in_cl, kernel_size=1, bias=True) + self.conv_a2g = None if in_cg == 0 else nn.Conv2d( + channels // r, in_cg, kernel_size=1, bias=True) + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + x = x if type(x) is tuple else (x, 0) + id_l, id_g = x + + x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1) + x = self.avgpool(x) + x = self.relu1(self.conv1(x)) + + x_l = 0 if self.conv_a2l is None else id_l * \ + self.sigmoid(self.conv_a2l(x)) + x_g = 0 if self.conv_a2g is None else id_g * \ + self.sigmoid(self.conv_a2g(x)) + return x_l, x_g + + +class FourierUnit(nn.Module): + + def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear', + spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'): + # bn_layer not used + super(FourierUnit, self).__init__() + self.groups = groups + + self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0), + out_channels=out_channels * 2, + kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False) + self.bn = torch.nn.BatchNorm2d(out_channels * 2) + self.relu = torch.nn.ReLU(inplace=True) + + # squeeze and excitation block + self.use_se = use_se + if use_se: + if se_kwargs is None: + se_kwargs = {} + self.se = SELayer(self.conv_layer.in_channels, **se_kwargs) + + self.spatial_scale_factor = spatial_scale_factor + self.spatial_scale_mode = spatial_scale_mode + self.spectral_pos_encoding = spectral_pos_encoding + self.ffc3d = ffc3d + self.fft_norm = fft_norm + + def forward(self, x): + batch = x.shape[0] + + if self.spatial_scale_factor is not None: + orig_size = x.shape[-2:] + x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False) + + r_size = x.size() + # (batch, c, h, w/2+1, 2) + fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) + ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) + ffted = torch.stack((ffted.real, ffted.imag), dim=-1) + ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) + ffted = ffted.view((batch, -1,) + ffted.size()[3:]) + + if self.spectral_pos_encoding: + height, width = ffted.shape[-2:] + coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted) + coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted) + ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1) + + if self.use_se: + ffted = self.se(ffted) + + ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1) + ffted = self.relu(self.bn(ffted)) + + ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( + 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) + ffted = torch.complex(ffted[..., 0], ffted[..., 1]) + + ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] + output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) + + if self.spatial_scale_factor is not None: + output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False) + + return output + + +class SpectralTransform(nn.Module): + + def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, **fu_kwargs): + # bn_layer not used + super(SpectralTransform, self).__init__() + self.enable_lfu = enable_lfu + if stride == 2: + self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) + else: + self.downsample = nn.Identity() + + self.stride = stride + self.conv1 = nn.Sequential( + nn.Conv2d(in_channels, out_channels // + 2, kernel_size=1, groups=groups, bias=False), + nn.BatchNorm2d(out_channels // 2), + nn.ReLU(inplace=True) + ) + self.fu = FourierUnit( + out_channels // 2, out_channels // 2, groups, **fu_kwargs) + if self.enable_lfu: + self.lfu = FourierUnit( + out_channels // 2, out_channels // 2, groups) + self.conv2 = torch.nn.Conv2d( + out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False) + + def forward(self, x): + + x = self.downsample(x) + x = self.conv1(x) + output = self.fu(x) + + if self.enable_lfu: + n, c, h, w = x.shape + split_no = 2 + split_s = h // split_no + xs = torch.cat(torch.split( + x[:, :c // 4], split_s, dim=-2), dim=1).contiguous() + xs = torch.cat(torch.split(xs, split_s, dim=-1), + dim=1).contiguous() + xs = self.lfu(xs) + xs = xs.repeat(1, 1, split_no, split_no).contiguous() + else: + xs = 0 + + output = self.conv2(x + output + xs) + + return output + + +class FFC(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, + ratio_gin, ratio_gout, stride=1, padding=0, + dilation=1, groups=1, bias=False, enable_lfu=True, + padding_type='reflect', gated=False, **spectral_kwargs): + super(FFC, self).__init__() + + assert stride == 1 or stride == 2, "Stride should be 1 or 2." + self.stride = stride + + in_cg = int(in_channels * ratio_gin) + in_cl = in_channels - in_cg + out_cg = int(out_channels * ratio_gout) + out_cl = out_channels - out_cg + #groups_g = 1 if groups == 1 else int(groups * ratio_gout) + #groups_l = 1 if groups == 1 else groups - groups_g + + self.ratio_gin = ratio_gin + self.ratio_gout = ratio_gout + self.global_in_num = in_cg + + module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d + self.convl2l = module(in_cl, out_cl, kernel_size, + stride, padding, dilation, groups, bias, padding_mode=padding_type) + module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d + self.convl2g = module(in_cl, out_cg, kernel_size, + stride, padding, dilation, groups, bias, padding_mode=padding_type) + module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d + self.convg2l = module(in_cg, out_cl, kernel_size, + stride, padding, dilation, groups, bias, padding_mode=padding_type) + module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform + self.convg2g = module( + in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs) + + self.gated = gated + module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d + self.gate = module(in_channels, 2, 1) + + def forward(self, x): + x_l, x_g = x if type(x) is tuple else (x, 0) + out_xl, out_xg = 0, 0 + + if self.gated: + total_input_parts = [x_l] + if torch.is_tensor(x_g): + total_input_parts.append(x_g) + total_input = torch.cat(total_input_parts, dim=1) + + gates = torch.sigmoid(self.gate(total_input)) + g2l_gate, l2g_gate = gates.chunk(2, dim=1) + else: + g2l_gate, l2g_gate = 1, 1 + + if self.ratio_gout != 1: + out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate + if self.ratio_gout != 0: + out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) + + return out_xl, out_xg + + +class FFC_BN_ACT(nn.Module): + + def __init__(self, in_channels, out_channels, + kernel_size, ratio_gin, ratio_gout, + stride=1, padding=0, dilation=1, groups=1, bias=False, + norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity, + padding_type='reflect', + enable_lfu=True, **kwargs): + super(FFC_BN_ACT, self).__init__() + self.ffc = FFC(in_channels, out_channels, kernel_size, + ratio_gin, ratio_gout, stride, padding, dilation, + groups, bias, enable_lfu, padding_type=padding_type, **kwargs) + lnorm = nn.Identity if ratio_gout == 1 else norm_layer + gnorm = nn.Identity if ratio_gout == 0 else norm_layer + global_channels = int(out_channels * ratio_gout) + self.bn_l = lnorm(out_channels - global_channels) + self.bn_g = gnorm(global_channels) + + lact = nn.Identity if ratio_gout == 1 else activation_layer + gact = nn.Identity if ratio_gout == 0 else activation_layer + self.act_l = lact(inplace=True) + self.act_g = gact(inplace=True) + + def forward(self, x): + x_l, x_g = self.ffc(x) + x_l = self.act_l(self.bn_l(x_l)) + x_g = self.act_g(self.bn_g(x_g)) + return x_l, x_g + + +class FFCResnetBlock(nn.Module): + def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1, + spatial_transform_kwargs=None, inline=False, **conv_kwargs): + super().__init__() + self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, + norm_layer=norm_layer, + activation_layer=activation_layer, + padding_type=padding_type, + **conv_kwargs) + self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, + norm_layer=norm_layer, + activation_layer=activation_layer, + padding_type=padding_type, + **conv_kwargs) + if spatial_transform_kwargs is not None: + self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs) + self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs) + self.inline = inline + + def forward(self, x): + if self.inline: + x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:] + else: + x_l, x_g = x if type(x) is tuple else (x, 0) + + id_l, id_g = x_l, x_g + + x_l, x_g = self.conv1((x_l, x_g)) + x_l, x_g = self.conv2((x_l, x_g)) + + x_l, x_g = id_l + x_l, id_g + x_g + out = x_l, x_g + if self.inline: + out = torch.cat(out, dim=1) + return out + + +class ConcatTupleLayer(nn.Module): + def forward(self, x): + assert isinstance(x, tuple) + x_l, x_g = x + assert torch.is_tensor(x_l) or torch.is_tensor(x_g) + if not torch.is_tensor(x_g): + return x_l + return torch.cat(x, dim=1) + + +class FFCResNetGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, + padding_type='reflect', activation_layer=nn.ReLU, + up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), + init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={}, + spatial_transform_layers=None, spatial_transform_kwargs={}, + add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}): + assert (n_blocks >= 0) + super().__init__() + + model = [nn.ReflectionPad2d(3), + FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer, + activation_layer=activation_layer, **init_conv_kwargs)] + + ### downsample + for i in range(n_downsampling): + mult = 2 ** i + if i == n_downsampling - 1: + cur_conv_kwargs = dict(downsample_conv_kwargs) + cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0) + else: + cur_conv_kwargs = downsample_conv_kwargs + model += [FFC_BN_ACT(min(max_features, ngf * mult), + min(max_features, ngf * mult * 2), + kernel_size=3, stride=2, padding=1, + norm_layer=norm_layer, + activation_layer=activation_layer, + **cur_conv_kwargs)] + + mult = 2 ** n_downsampling + feats_num_bottleneck = min(max_features, ngf * mult) + + ### resnet blocks + for i in range(n_blocks): + cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer, + norm_layer=norm_layer, **resnet_conv_kwargs) + if spatial_transform_layers is not None and i in spatial_transform_layers: + cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs) + model += [cur_resblock] + + model += [ConcatTupleLayer()] + + ### upsample + for i in range(n_downsampling): + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(min(max_features, ngf * mult), + min(max_features, int(ngf * mult / 2)), + kernel_size=3, stride=2, padding=1, output_padding=1), + up_norm_layer(min(max_features, int(ngf * mult / 2))), + up_activation] + + if out_ffc: + model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer, + norm_layer=norm_layer, inline=True, **out_ffc_kwargs)] + + model += [nn.ReflectionPad2d(3), + nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + if add_out_act: + model.append(get_activation('tanh' if add_out_act is True else add_out_act)) + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + + +class FFCNLayerDiscriminator(BaseDiscriminator): + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512, + init_conv_kwargs={}, conv_kwargs={}): + super().__init__() + self.n_layers = n_layers + + def _act_ctor(inplace=True): + return nn.LeakyReLU(negative_slope=0.2, inplace=inplace) + + kw = 3 + padw = int(np.ceil((kw-1.0)/2)) + sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer, + activation_layer=_act_ctor, **init_conv_kwargs)]] + + nf = ndf + for n in range(1, n_layers): + nf_prev = nf + nf = min(nf * 2, max_features) + + cur_model = [ + FFC_BN_ACT(nf_prev, nf, + kernel_size=kw, stride=2, padding=padw, + norm_layer=norm_layer, + activation_layer=_act_ctor, + **conv_kwargs) + ] + sequence.append(cur_model) + + nf_prev = nf + nf = min(nf * 2, 512) + + cur_model = [ + FFC_BN_ACT(nf_prev, nf, + kernel_size=kw, stride=1, padding=padw, + norm_layer=norm_layer, + activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs), + **conv_kwargs), + ConcatTupleLayer() + ] + sequence.append(cur_model) + + sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] + + for n in range(len(sequence)): + setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) + + def get_all_activations(self, x): + res = [x] + for n in range(self.n_layers + 2): + model = getattr(self, 'model' + str(n)) + res.append(model(res[-1])) + return res[1:] + + def forward(self, x): + act = self.get_all_activations(x) + feats = [] + for out in act[:-1]: + if isinstance(out, tuple): + if torch.is_tensor(out[1]): + out = torch.cat(out, dim=1) + else: + out = out[0] + feats.append(out) + return act[-1], feats diff --git a/lama/saicinpainting/training/modules/multidilated_conv.py b/lama/saicinpainting/training/modules/multidilated_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..0a3a40e20f171899724f7c2b06c9604f988943d3 --- /dev/null +++ b/lama/saicinpainting/training/modules/multidilated_conv.py @@ -0,0 +1,98 @@ +import torch +import torch.nn as nn +import random +from ....saicinpainting.training.modules.depthwise_sep_conv import DepthWiseSeperableConv + +class MultidilatedConv(nn.Module): + def __init__(self, in_dim, out_dim, kernel_size, dilation_num=3, comb_mode='sum', equal_dim=True, + shared_weights=False, padding=1, min_dilation=1, shuffle_in_channels=False, use_depthwise=False, **kwargs): + super().__init__() + convs = [] + self.equal_dim = equal_dim + assert comb_mode in ('cat_out', 'sum', 'cat_in', 'cat_both'), comb_mode + if comb_mode in ('cat_out', 'cat_both'): + self.cat_out = True + if equal_dim: + assert out_dim % dilation_num == 0 + out_dims = [out_dim // dilation_num] * dilation_num + self.index = sum([[i + j * (out_dims[0]) for j in range(dilation_num)] for i in range(out_dims[0])], []) + else: + out_dims = [out_dim // 2 ** (i + 1) for i in range(dilation_num - 1)] + out_dims.append(out_dim - sum(out_dims)) + index = [] + starts = [0] + out_dims[:-1] + lengths = [out_dims[i] // out_dims[-1] for i in range(dilation_num)] + for i in range(out_dims[-1]): + for j in range(dilation_num): + index += list(range(starts[j], starts[j] + lengths[j])) + starts[j] += lengths[j] + self.index = index + assert(len(index) == out_dim) + self.out_dims = out_dims + else: + self.cat_out = False + self.out_dims = [out_dim] * dilation_num + + if comb_mode in ('cat_in', 'cat_both'): + if equal_dim: + assert in_dim % dilation_num == 0 + in_dims = [in_dim // dilation_num] * dilation_num + else: + in_dims = [in_dim // 2 ** (i + 1) for i in range(dilation_num - 1)] + in_dims.append(in_dim - sum(in_dims)) + self.in_dims = in_dims + self.cat_in = True + else: + self.cat_in = False + self.in_dims = [in_dim] * dilation_num + + conv_type = DepthWiseSeperableConv if use_depthwise else nn.Conv2d + dilation = min_dilation + for i in range(dilation_num): + if isinstance(padding, int): + cur_padding = padding * dilation + else: + cur_padding = padding[i] + convs.append(conv_type( + self.in_dims[i], self.out_dims[i], kernel_size, padding=cur_padding, dilation=dilation, **kwargs + )) + if i > 0 and shared_weights: + convs[-1].weight = convs[0].weight + convs[-1].bias = convs[0].bias + dilation *= 2 + self.convs = nn.ModuleList(convs) + + self.shuffle_in_channels = shuffle_in_channels + if self.shuffle_in_channels: + # shuffle list as shuffling of tensors is nondeterministic + in_channels_permute = list(range(in_dim)) + random.shuffle(in_channels_permute) + # save as buffer so it is saved and loaded with checkpoint + self.register_buffer('in_channels_permute', torch.tensor(in_channels_permute)) + + def forward(self, x): + if self.shuffle_in_channels: + x = x[:, self.in_channels_permute] + + outs = [] + if self.cat_in: + if self.equal_dim: + x = x.chunk(len(self.convs), dim=1) + else: + new_x = [] + start = 0 + for dim in self.in_dims: + new_x.append(x[:, start:start+dim]) + start += dim + x = new_x + for i, conv in enumerate(self.convs): + if self.cat_in: + input = x[i] + else: + input = x + outs.append(conv(input)) + if self.cat_out: + out = torch.cat(outs, dim=1)[:, self.index] + else: + out = sum(outs) + return out diff --git a/lama/saicinpainting/training/modules/multiscale.py b/lama/saicinpainting/training/modules/multiscale.py new file mode 100644 index 0000000000000000000000000000000000000000..65f0a54925593e9da8106bfc6d65a4098ce001d7 --- /dev/null +++ b/lama/saicinpainting/training/modules/multiscale.py @@ -0,0 +1,244 @@ +from typing import List, Tuple, Union, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from saicinpainting.training.modules.base import get_conv_block_ctor, get_activation +from saicinpainting.training.modules.pix2pixhd import ResnetBlock + + +class ResNetHead(nn.Module): + def __init__(self, input_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, + padding_type='reflect', conv_kind='default', activation=nn.ReLU(True)): + assert (n_blocks >= 0) + super(ResNetHead, self).__init__() + + conv_layer = get_conv_block_ctor(conv_kind) + + model = [nn.ReflectionPad2d(3), + conv_layer(input_nc, ngf, kernel_size=7, padding=0), + norm_layer(ngf), + activation] + + ### downsample + for i in range(n_downsampling): + mult = 2 ** i + model += [conv_layer(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), + norm_layer(ngf * mult * 2), + activation] + + mult = 2 ** n_downsampling + + ### resnet blocks + for i in range(n_blocks): + model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer, + conv_kind=conv_kind)] + + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + + +class ResNetTail(nn.Module): + def __init__(self, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, + padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), + up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0, + add_in_proj=None): + assert (n_blocks >= 0) + super(ResNetTail, self).__init__() + + mult = 2 ** n_downsampling + + model = [] + + if add_in_proj is not None: + model.append(nn.Conv2d(add_in_proj, ngf * mult, kernel_size=1)) + + ### resnet blocks + for i in range(n_blocks): + model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer, + conv_kind=conv_kind)] + + ### upsample + for i in range(n_downsampling): + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, + output_padding=1), + up_norm_layer(int(ngf * mult / 2)), + up_activation] + self.model = nn.Sequential(*model) + + out_layers = [] + for _ in range(out_extra_layers_n): + out_layers += [nn.Conv2d(ngf, ngf, kernel_size=1, padding=0), + up_norm_layer(ngf), + up_activation] + out_layers += [nn.ReflectionPad2d(3), + nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + + if add_out_act: + out_layers.append(get_activation('tanh' if add_out_act is True else add_out_act)) + + self.out_proj = nn.Sequential(*out_layers) + + def forward(self, input, return_last_act=False): + features = self.model(input) + out = self.out_proj(features) + if return_last_act: + return out, features + else: + return out + + +class MultiscaleResNet(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=2, n_blocks_head=2, n_blocks_tail=6, n_scales=3, + norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), + up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), add_out_act=False, out_extra_layers_n=0, + out_cumulative=False, return_only_hr=False): + super().__init__() + + self.heads = nn.ModuleList([ResNetHead(input_nc, ngf=ngf, n_downsampling=n_downsampling, + n_blocks=n_blocks_head, norm_layer=norm_layer, padding_type=padding_type, + conv_kind=conv_kind, activation=activation) + for i in range(n_scales)]) + tail_in_feats = ngf * (2 ** n_downsampling) + ngf + self.tails = nn.ModuleList([ResNetTail(output_nc, + ngf=ngf, n_downsampling=n_downsampling, + n_blocks=n_blocks_tail, norm_layer=norm_layer, padding_type=padding_type, + conv_kind=conv_kind, activation=activation, up_norm_layer=up_norm_layer, + up_activation=up_activation, add_out_act=add_out_act, + out_extra_layers_n=out_extra_layers_n, + add_in_proj=None if (i == n_scales - 1) else tail_in_feats) + for i in range(n_scales)]) + + self.out_cumulative = out_cumulative + self.return_only_hr = return_only_hr + + @property + def num_scales(self): + return len(self.heads) + + def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \ + -> Union[torch.Tensor, List[torch.Tensor]]: + """ + :param ms_inputs: List of inputs of different resolutions from HR to LR + :param smallest_scales_num: int or None, number of smallest scales to take at input + :return: Depending on return_only_hr: + True: Only the most HR output + False: List of outputs of different resolutions from HR to LR + """ + if smallest_scales_num is None: + assert len(self.heads) == len(ms_inputs), (len(self.heads), len(ms_inputs), smallest_scales_num) + smallest_scales_num = len(self.heads) + else: + assert smallest_scales_num == len(ms_inputs) <= len(self.heads), (len(self.heads), len(ms_inputs), smallest_scales_num) + + cur_heads = self.heads[-smallest_scales_num:] + ms_features = [cur_head(cur_inp) for cur_head, cur_inp in zip(cur_heads, ms_inputs)] + + all_outputs = [] + prev_tail_features = None + for i in range(len(ms_features)): + scale_i = -i - 1 + + cur_tail_input = ms_features[-i - 1] + if prev_tail_features is not None: + if prev_tail_features.shape != cur_tail_input.shape: + prev_tail_features = F.interpolate(prev_tail_features, size=cur_tail_input.shape[2:], + mode='bilinear', align_corners=False) + cur_tail_input = torch.cat((cur_tail_input, prev_tail_features), dim=1) + + cur_out, cur_tail_feats = self.tails[scale_i](cur_tail_input, return_last_act=True) + + prev_tail_features = cur_tail_feats + all_outputs.append(cur_out) + + if self.out_cumulative: + all_outputs_cum = [all_outputs[0]] + for i in range(1, len(ms_features)): + cur_out = all_outputs[i] + cur_out_cum = cur_out + F.interpolate(all_outputs_cum[-1], size=cur_out.shape[2:], + mode='bilinear', align_corners=False) + all_outputs_cum.append(cur_out_cum) + all_outputs = all_outputs_cum + + if self.return_only_hr: + return all_outputs[-1] + else: + return all_outputs[::-1] + + +class MultiscaleDiscriminatorSimple(nn.Module): + def __init__(self, ms_impl): + super().__init__() + self.ms_impl = nn.ModuleList(ms_impl) + + @property + def num_scales(self): + return len(self.ms_impl) + + def forward(self, ms_inputs: List[torch.Tensor], smallest_scales_num: Optional[int] = None) \ + -> List[Tuple[torch.Tensor, List[torch.Tensor]]]: + """ + :param ms_inputs: List of inputs of different resolutions from HR to LR + :param smallest_scales_num: int or None, number of smallest scales to take at input + :return: List of pairs (prediction, features) for different resolutions from HR to LR + """ + if smallest_scales_num is None: + assert len(self.ms_impl) == len(ms_inputs), (len(self.ms_impl), len(ms_inputs), smallest_scales_num) + smallest_scales_num = len(self.heads) + else: + assert smallest_scales_num == len(ms_inputs) <= len(self.ms_impl), \ + (len(self.ms_impl), len(ms_inputs), smallest_scales_num) + + return [cur_discr(cur_input) for cur_discr, cur_input in zip(self.ms_impl[-smallest_scales_num:], ms_inputs)] + + +class SingleToMultiScaleInputMixin: + def forward(self, x: torch.Tensor) -> List: + orig_height, orig_width = x.shape[2:] + factors = [2 ** i for i in range(self.num_scales)] + ms_inputs = [F.interpolate(x, size=(orig_height // f, orig_width // f), mode='bilinear', align_corners=False) + for f in factors] + return super().forward(ms_inputs) + + +class GeneratorMultiToSingleOutputMixin: + def forward(self, x): + return super().forward(x)[0] + + +class DiscriminatorMultiToSingleOutputMixin: + def forward(self, x): + out_feat_tuples = super().forward(x) + return out_feat_tuples[0][0], [f for _, flist in out_feat_tuples for f in flist] + + +class DiscriminatorMultiToSingleOutputStackedMixin: + def __init__(self, *args, return_feats_only_levels=None, **kwargs): + super().__init__(*args, **kwargs) + self.return_feats_only_levels = return_feats_only_levels + + def forward(self, x): + out_feat_tuples = super().forward(x) + outs = [out for out, _ in out_feat_tuples] + scaled_outs = [outs[0]] + [F.interpolate(cur_out, size=outs[0].shape[-2:], + mode='bilinear', align_corners=False) + for cur_out in outs[1:]] + out = torch.cat(scaled_outs, dim=1) + if self.return_feats_only_levels is not None: + feat_lists = [out_feat_tuples[i][1] for i in self.return_feats_only_levels] + else: + feat_lists = [flist for _, flist in out_feat_tuples] + feats = [f for flist in feat_lists for f in flist] + return out, feats + + +class MultiscaleDiscrSingleInput(SingleToMultiScaleInputMixin, DiscriminatorMultiToSingleOutputStackedMixin, MultiscaleDiscriminatorSimple): + pass + + +class MultiscaleResNetSingle(GeneratorMultiToSingleOutputMixin, SingleToMultiScaleInputMixin, MultiscaleResNet): + pass diff --git a/lama/saicinpainting/training/modules/pix2pixhd.py b/lama/saicinpainting/training/modules/pix2pixhd.py new file mode 100644 index 0000000000000000000000000000000000000000..7446aac6d62f3e680ee2f8d0c38cc417b317c597 --- /dev/null +++ b/lama/saicinpainting/training/modules/pix2pixhd.py @@ -0,0 +1,669 @@ +# original: https://github.com/NVIDIA/pix2pixHD/blob/master/models/networks.py +import collections +from functools import partial +import functools +import logging +from collections import defaultdict + +import numpy as np +import torch.nn as nn + +from ....saicinpainting.training.modules.base import BaseDiscriminator, deconv_factory, get_conv_block_ctor, get_norm_layer, get_activation +from ....saicinpainting.training.modules.ffc import FFCResnetBlock +from ....saicinpainting.training.modules.multidilated_conv import MultidilatedConv + +class DotDict(defaultdict): + # https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary + """dot.notation access to dictionary attributes""" + __getattr__ = defaultdict.get + __setattr__ = defaultdict.__setitem__ + __delattr__ = defaultdict.__delitem__ + +class Identity(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x + + +class ResnetBlock(nn.Module): + def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default', + dilation=1, in_dim=None, groups=1, second_dilation=None): + super(ResnetBlock, self).__init__() + self.in_dim = in_dim + self.dim = dim + if second_dilation is None: + second_dilation = dilation + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout, + conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups, + second_dilation=second_dilation) + + if self.in_dim is not None: + self.input_conv = nn.Conv2d(in_dim, dim, 1) + + self.out_channnels = dim + + def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default', + dilation=1, in_dim=None, groups=1, second_dilation=1): + conv_layer = get_conv_block_ctor(conv_kind) + + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(dilation)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(dilation)] + elif padding_type == 'zero': + p = dilation + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + if in_dim is None: + in_dim = dim + + conv_block += [conv_layer(in_dim, dim, kernel_size=3, padding=p, dilation=dilation), + norm_layer(dim), + activation] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(second_dilation)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(second_dilation)] + elif padding_type == 'zero': + p = second_dilation + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [conv_layer(dim, dim, kernel_size=3, padding=p, dilation=second_dilation, groups=groups), + norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + x_before = x + if self.in_dim is not None: + x = self.input_conv(x) + out = x + self.conv_block(x_before) + return out + +class ResnetBlock5x5(nn.Module): + def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False, conv_kind='default', + dilation=1, in_dim=None, groups=1, second_dilation=None): + super(ResnetBlock5x5, self).__init__() + self.in_dim = in_dim + self.dim = dim + if second_dilation is None: + second_dilation = dilation + self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout, + conv_kind=conv_kind, dilation=dilation, in_dim=in_dim, groups=groups, + second_dilation=second_dilation) + + if self.in_dim is not None: + self.input_conv = nn.Conv2d(in_dim, dim, 1) + + self.out_channnels = dim + + def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout, conv_kind='default', + dilation=1, in_dim=None, groups=1, second_dilation=1): + conv_layer = get_conv_block_ctor(conv_kind) + + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(dilation * 2)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(dilation * 2)] + elif padding_type == 'zero': + p = dilation * 2 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + if in_dim is None: + in_dim = dim + + conv_block += [conv_layer(in_dim, dim, kernel_size=5, padding=p, dilation=dilation), + norm_layer(dim), + activation] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(second_dilation * 2)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(second_dilation * 2)] + elif padding_type == 'zero': + p = second_dilation * 2 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [conv_layer(dim, dim, kernel_size=5, padding=p, dilation=second_dilation, groups=groups), + norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + x_before = x + if self.in_dim is not None: + x = self.input_conv(x) + out = x + self.conv_block(x_before) + return out + + +class MultidilatedResnetBlock(nn.Module): + def __init__(self, dim, padding_type, conv_layer, norm_layer, activation=nn.ReLU(True), use_dropout=False): + super().__init__() + self.conv_block = self.build_conv_block(dim, padding_type, conv_layer, norm_layer, activation, use_dropout) + + def build_conv_block(self, dim, padding_type, conv_layer, norm_layer, activation, use_dropout, dilation=1): + conv_block = [] + conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type), + norm_layer(dim), + activation] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + conv_block += [conv_layer(dim, dim, kernel_size=3, padding_mode=padding_type), + norm_layer(dim)] + + return nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out + + +class MultiDilatedGlobalGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, + n_blocks=3, norm_layer=nn.BatchNorm2d, + padding_type='reflect', conv_kind='default', + deconv_kind='convtranspose', activation=nn.ReLU(True), + up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True), + add_out_act=True, max_features=1024, multidilation_kwargs={}, + ffc_positions=None, ffc_kwargs={}): + assert (n_blocks >= 0) + super().__init__() + + conv_layer = get_conv_block_ctor(conv_kind) + resnet_conv_layer = functools.partial(get_conv_block_ctor('multidilated'), **multidilation_kwargs) + norm_layer = get_norm_layer(norm_layer) + if affine is not None: + norm_layer = partial(norm_layer, affine=affine) + up_norm_layer = get_norm_layer(up_norm_layer) + if affine is not None: + up_norm_layer = partial(up_norm_layer, affine=affine) + + model = [nn.ReflectionPad2d(3), + conv_layer(input_nc, ngf, kernel_size=7, padding=0), + norm_layer(ngf), + activation] + + identity = Identity() + ### downsample + for i in range(n_downsampling): + mult = 2 ** i + + model += [conv_layer(min(max_features, ngf * mult), + min(max_features, ngf * mult * 2), + kernel_size=3, stride=2, padding=1), + norm_layer(min(max_features, ngf * mult * 2)), + activation] + + mult = 2 ** n_downsampling + feats_num_bottleneck = min(max_features, ngf * mult) + + ### resnet blocks + for i in range(n_blocks): + if ffc_positions is not None and i in ffc_positions: + model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU, + inline=True, **ffc_kwargs)] + model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type, + conv_layer=resnet_conv_layer, activation=activation, + norm_layer=norm_layer)] + + ### upsample + for i in range(n_downsampling): + mult = 2 ** (n_downsampling - i) + model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features) + model += [nn.ReflectionPad2d(3), + nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + if add_out_act: + model.append(get_activation('tanh' if add_out_act is True else add_out_act)) + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + +class ConfigGlobalGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, + n_blocks=3, norm_layer=nn.BatchNorm2d, + padding_type='reflect', conv_kind='default', + deconv_kind='convtranspose', activation=nn.ReLU(True), + up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True), + add_out_act=True, max_features=1024, + manual_block_spec=[], + resnet_block_kind='multidilatedresnetblock', + resnet_conv_kind='multidilated', + resnet_dilation=1, + multidilation_kwargs={}): + assert (n_blocks >= 0) + super().__init__() + + conv_layer = get_conv_block_ctor(conv_kind) + resnet_conv_layer = functools.partial(get_conv_block_ctor(resnet_conv_kind), **multidilation_kwargs) + norm_layer = get_norm_layer(norm_layer) + if affine is not None: + norm_layer = partial(norm_layer, affine=affine) + up_norm_layer = get_norm_layer(up_norm_layer) + if affine is not None: + up_norm_layer = partial(up_norm_layer, affine=affine) + + model = [nn.ReflectionPad2d(3), + conv_layer(input_nc, ngf, kernel_size=7, padding=0), + norm_layer(ngf), + activation] + + identity = Identity() + + ### downsample + for i in range(n_downsampling): + mult = 2 ** i + model += [conv_layer(min(max_features, ngf * mult), + min(max_features, ngf * mult * 2), + kernel_size=3, stride=2, padding=1), + norm_layer(min(max_features, ngf * mult * 2)), + activation] + + mult = 2 ** n_downsampling + feats_num_bottleneck = min(max_features, ngf * mult) + + if len(manual_block_spec) == 0: + manual_block_spec = [ + DotDict(lambda : None, { + 'n_blocks': n_blocks, + 'use_default': True}) + ] + + ### resnet blocks + for block_spec in manual_block_spec: + def make_and_add_blocks(model, block_spec): + block_spec = DotDict(lambda : None, block_spec) + if not block_spec.use_default: + resnet_conv_layer = functools.partial(get_conv_block_ctor(block_spec.resnet_conv_kind), **block_spec.multidilation_kwargs) + resnet_conv_kind = block_spec.resnet_conv_kind + resnet_block_kind = block_spec.resnet_block_kind + if block_spec.resnet_dilation is not None: + resnet_dilation = block_spec.resnet_dilation + for i in range(block_spec.n_blocks): + if resnet_block_kind == "multidilatedresnetblock": + model += [MultidilatedResnetBlock(feats_num_bottleneck, padding_type=padding_type, + conv_layer=resnet_conv_layer, activation=activation, + norm_layer=norm_layer)] + if resnet_block_kind == "resnetblock": + model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer, + conv_kind=resnet_conv_kind)] + if resnet_block_kind == "resnetblock5x5": + model += [ResnetBlock5x5(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer, + conv_kind=resnet_conv_kind)] + if resnet_block_kind == "resnetblockdwdil": + model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer, + conv_kind=resnet_conv_kind, dilation=resnet_dilation, second_dilation=resnet_dilation)] + make_and_add_blocks(model, block_spec) + + ### upsample + for i in range(n_downsampling): + mult = 2 ** (n_downsampling - i) + model += deconv_factory(deconv_kind, ngf, mult, up_norm_layer, up_activation, max_features) + model += [nn.ReflectionPad2d(3), + nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + if add_out_act: + model.append(get_activation('tanh' if add_out_act is True else add_out_act)) + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + + +def make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs): + blocks = [] + for i in range(dilated_blocks_n): + if dilation_block_kind == 'simple': + blocks.append(ResnetBlock(**dilated_block_kwargs, dilation=2 ** (i + 1))) + elif dilation_block_kind == 'multi': + blocks.append(MultidilatedResnetBlock(**dilated_block_kwargs)) + else: + raise ValueError(f'dilation_block_kind could not be "{dilation_block_kind}"') + return blocks + + +class GlobalGenerator(nn.Module): + def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, + padding_type='reflect', conv_kind='default', activation=nn.ReLU(True), + up_norm_layer=nn.BatchNorm2d, affine=None, + up_activation=nn.ReLU(True), dilated_blocks_n=0, dilated_blocks_n_start=0, + dilated_blocks_n_middle=0, + add_out_act=True, + max_features=1024, is_resblock_depthwise=False, + ffc_positions=None, ffc_kwargs={}, dilation=1, second_dilation=None, + dilation_block_kind='simple', multidilation_kwargs={}): + assert (n_blocks >= 0) + super().__init__() + + conv_layer = get_conv_block_ctor(conv_kind) + norm_layer = get_norm_layer(norm_layer) + if affine is not None: + norm_layer = partial(norm_layer, affine=affine) + up_norm_layer = get_norm_layer(up_norm_layer) + if affine is not None: + up_norm_layer = partial(up_norm_layer, affine=affine) + + if ffc_positions is not None: + ffc_positions = collections.Counter(ffc_positions) + + model = [nn.ReflectionPad2d(3), + conv_layer(input_nc, ngf, kernel_size=7, padding=0), + norm_layer(ngf), + activation] + + identity = Identity() + ### downsample + for i in range(n_downsampling): + mult = 2 ** i + + model += [conv_layer(min(max_features, ngf * mult), + min(max_features, ngf * mult * 2), + kernel_size=3, stride=2, padding=1), + norm_layer(min(max_features, ngf * mult * 2)), + activation] + + mult = 2 ** n_downsampling + feats_num_bottleneck = min(max_features, ngf * mult) + + dilated_block_kwargs = dict(dim=feats_num_bottleneck, padding_type=padding_type, + activation=activation, norm_layer=norm_layer) + if dilation_block_kind == 'simple': + dilated_block_kwargs['conv_kind'] = conv_kind + elif dilation_block_kind == 'multi': + dilated_block_kwargs['conv_layer'] = functools.partial( + get_conv_block_ctor('multidilated'), **multidilation_kwargs) + + # dilated blocks at the start of the bottleneck sausage + if dilated_blocks_n_start is not None and dilated_blocks_n_start > 0: + model += make_dil_blocks(dilated_blocks_n_start, dilation_block_kind, dilated_block_kwargs) + + # resnet blocks + for i in range(n_blocks): + # dilated blocks at the middle of the bottleneck sausage + if i == n_blocks // 2 and dilated_blocks_n_middle is not None and dilated_blocks_n_middle > 0: + model += make_dil_blocks(dilated_blocks_n_middle, dilation_block_kind, dilated_block_kwargs) + + if ffc_positions is not None and i in ffc_positions: + for _ in range(ffc_positions[i]): # same position can occur more than once + model += [FFCResnetBlock(feats_num_bottleneck, padding_type, norm_layer, activation_layer=nn.ReLU, + inline=True, **ffc_kwargs)] + + if is_resblock_depthwise: + resblock_groups = feats_num_bottleneck + else: + resblock_groups = 1 + + model += [ResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation=activation, + norm_layer=norm_layer, conv_kind=conv_kind, groups=resblock_groups, + dilation=dilation, second_dilation=second_dilation)] + + + # dilated blocks at the end of the bottleneck sausage + if dilated_blocks_n is not None and dilated_blocks_n > 0: + model += make_dil_blocks(dilated_blocks_n, dilation_block_kind, dilated_block_kwargs) + + # upsample + for i in range(n_downsampling): + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(min(max_features, ngf * mult), + min(max_features, int(ngf * mult / 2)), + kernel_size=3, stride=2, padding=1, output_padding=1), + up_norm_layer(min(max_features, int(ngf * mult / 2))), + up_activation] + model += [nn.ReflectionPad2d(3), + nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + if add_out_act: + model.append(get_activation('tanh' if add_out_act is True else add_out_act)) + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + + +class GlobalGeneratorGated(GlobalGenerator): + def __init__(self, *args, **kwargs): + real_kwargs=dict( + conv_kind='gated_bn_relu', + activation=nn.Identity(), + norm_layer=nn.Identity + ) + real_kwargs.update(kwargs) + super().__init__(*args, **real_kwargs) + + +class GlobalGeneratorFromSuperChannels(nn.Module): + def __init__(self, input_nc, output_nc, n_downsampling, n_blocks, super_channels, norm_layer="bn", padding_type='reflect', add_out_act=True): + super().__init__() + self.n_downsampling = n_downsampling + norm_layer = get_norm_layer(norm_layer) + if type(norm_layer) == functools.partial: + use_bias = (norm_layer.func == nn.InstanceNorm2d) + else: + use_bias = (norm_layer == nn.InstanceNorm2d) + + channels = self.convert_super_channels(super_channels) + self.channels = channels + + model = [nn.ReflectionPad2d(3), + nn.Conv2d(input_nc, channels[0], kernel_size=7, padding=0, bias=use_bias), + norm_layer(channels[0]), + nn.ReLU(True)] + + for i in range(n_downsampling): # add downsampling layers + mult = 2 ** i + model += [nn.Conv2d(channels[0+i], channels[1+i], kernel_size=3, stride=2, padding=1, bias=use_bias), + norm_layer(channels[1+i]), + nn.ReLU(True)] + + mult = 2 ** n_downsampling + + n_blocks1 = n_blocks // 3 + n_blocks2 = n_blocks1 + n_blocks3 = n_blocks - n_blocks1 - n_blocks2 + + for i in range(n_blocks1): + c = n_downsampling + dim = channels[c] + model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer)] + + for i in range(n_blocks2): + c = n_downsampling+1 + dim = channels[c] + kwargs = {} + if i == 0: + kwargs = {"in_dim": channels[c-1]} + model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)] + + for i in range(n_blocks3): + c = n_downsampling+2 + dim = channels[c] + kwargs = {} + if i == 0: + kwargs = {"in_dim": channels[c-1]} + model += [ResnetBlock(dim, padding_type=padding_type, norm_layer=norm_layer, **kwargs)] + + for i in range(n_downsampling): # add upsampling layers + mult = 2 ** (n_downsampling - i) + model += [nn.ConvTranspose2d(channels[n_downsampling+3+i], + channels[n_downsampling+3+i+1], + kernel_size=3, stride=2, + padding=1, output_padding=1, + bias=use_bias), + norm_layer(channels[n_downsampling+3+i+1]), + nn.ReLU(True)] + model += [nn.ReflectionPad2d(3)] + model += [nn.Conv2d(channels[2*n_downsampling+3], output_nc, kernel_size=7, padding=0)] + + if add_out_act: + model.append(get_activation('tanh' if add_out_act is True else add_out_act)) + self.model = nn.Sequential(*model) + + def convert_super_channels(self, super_channels): + n_downsampling = self.n_downsampling + result = [] + cnt = 0 + + if n_downsampling == 2: + N1 = 10 + elif n_downsampling == 3: + N1 = 13 + else: + raise NotImplementedError + + for i in range(0, N1): + if i in [1,4,7,10]: + channel = super_channels[cnt] * (2 ** cnt) + config = {'channel': channel} + result.append(channel) + logging.info(f"Downsample channels {result[-1]}") + cnt += 1 + + for i in range(3): + for counter, j in enumerate(range(N1 + i * 3, N1 + 3 + i * 3)): + if len(super_channels) == 6: + channel = super_channels[3] * 4 + else: + channel = super_channels[i + 3] * 4 + config = {'channel': channel} + if counter == 0: + result.append(channel) + logging.info(f"Bottleneck channels {result[-1]}") + cnt = 2 + + for i in range(N1+9, N1+21): + if i in [22, 25,28]: + cnt -= 1 + if len(super_channels) == 6: + channel = super_channels[5 - cnt] * (2 ** cnt) + else: + channel = super_channels[7 - cnt] * (2 ** cnt) + result.append(int(channel)) + logging.info(f"Upsample channels {result[-1]}") + return result + + def forward(self, input): + return self.model(input) + + +# Defines the PatchGAN discriminator with the specified arguments. +class NLayerDiscriminator(BaseDiscriminator): + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,): + super().__init__() + self.n_layers = n_layers + + kw = 4 + padw = int(np.ceil((kw-1.0)/2)) + sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), + nn.LeakyReLU(0.2, True)]] + + nf = ndf + for n in range(1, n_layers): + nf_prev = nf + nf = min(nf * 2, 512) + + cur_model = [] + cur_model += [ + nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), + norm_layer(nf), + nn.LeakyReLU(0.2, True) + ] + sequence.append(cur_model) + + nf_prev = nf + nf = min(nf * 2, 512) + + cur_model = [] + cur_model += [ + nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), + norm_layer(nf), + nn.LeakyReLU(0.2, True) + ] + sequence.append(cur_model) + + sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] + + for n in range(len(sequence)): + setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) + + def get_all_activations(self, x): + res = [x] + for n in range(self.n_layers + 2): + model = getattr(self, 'model' + str(n)) + res.append(model(res[-1])) + return res[1:] + + def forward(self, x): + act = self.get_all_activations(x) + return act[-1], act[:-1] + + +class MultidilatedNLayerDiscriminator(BaseDiscriminator): + def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, multidilation_kwargs={}): + super().__init__() + self.n_layers = n_layers + + kw = 4 + padw = int(np.ceil((kw-1.0)/2)) + sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), + nn.LeakyReLU(0.2, True)]] + + nf = ndf + for n in range(1, n_layers): + nf_prev = nf + nf = min(nf * 2, 512) + + cur_model = [] + cur_model += [ + MultidilatedConv(nf_prev, nf, kernel_size=kw, stride=2, padding=[2, 3], **multidilation_kwargs), + norm_layer(nf), + nn.LeakyReLU(0.2, True) + ] + sequence.append(cur_model) + + nf_prev = nf + nf = min(nf * 2, 512) + + cur_model = [] + cur_model += [ + nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), + norm_layer(nf), + nn.LeakyReLU(0.2, True) + ] + sequence.append(cur_model) + + sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] + + for n in range(len(sequence)): + setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) + + def get_all_activations(self, x): + res = [x] + for n in range(self.n_layers + 2): + model = getattr(self, 'model' + str(n)) + res.append(model(res[-1])) + return res[1:] + + def forward(self, x): + act = self.get_all_activations(x) + return act[-1], act[:-1] + + +class NLayerDiscriminatorAsGen(NLayerDiscriminator): + def forward(self, x): + return super().forward(x)[0] diff --git a/lama/saicinpainting/training/modules/spatial_transform.py b/lama/saicinpainting/training/modules/spatial_transform.py new file mode 100644 index 0000000000000000000000000000000000000000..2de024ba08c549605a08b64d096f1f0db7b7722a --- /dev/null +++ b/lama/saicinpainting/training/modules/spatial_transform.py @@ -0,0 +1,49 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from kornia.geometry.transform import rotate + + +class LearnableSpatialTransformWrapper(nn.Module): + def __init__(self, impl, pad_coef=0.5, angle_init_range=80, train_angle=True): + super().__init__() + self.impl = impl + self.angle = torch.rand(1) * angle_init_range + if train_angle: + self.angle = nn.Parameter(self.angle, requires_grad=True) + self.pad_coef = pad_coef + + def forward(self, x): + if torch.is_tensor(x): + return self.inverse_transform(self.impl(self.transform(x)), x) + elif isinstance(x, tuple): + x_trans = tuple(self.transform(elem) for elem in x) + y_trans = self.impl(x_trans) + return tuple(self.inverse_transform(elem, orig_x) for elem, orig_x in zip(y_trans, x)) + else: + raise ValueError(f'Unexpected input type {type(x)}') + + def transform(self, x): + height, width = x.shape[2:] + pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef) + x_padded = F.pad(x, [pad_w, pad_w, pad_h, pad_h], mode='reflect') + x_padded_rotated = rotate(x_padded, angle=self.angle.to(x_padded)) + return x_padded_rotated + + def inverse_transform(self, y_padded_rotated, orig_x): + height, width = orig_x.shape[2:] + pad_h, pad_w = int(height * self.pad_coef), int(width * self.pad_coef) + + y_padded = rotate(y_padded_rotated, angle=-self.angle.to(y_padded_rotated)) + y_height, y_width = y_padded.shape[2:] + y = y_padded[:, :, pad_h : y_height - pad_h, pad_w : y_width - pad_w] + return y + + +if __name__ == '__main__': + layer = LearnableSpatialTransformWrapper(nn.Identity()) + x = torch.arange(2* 3 * 15 * 15).view(2, 3, 15, 15).float() + y = layer(x) + assert x.shape == y.shape + assert torch.allclose(x[:, :, 1:, 1:][:, :, :-1, :-1], y[:, :, 1:, 1:][:, :, :-1, :-1]) + print('all ok') diff --git a/lama/saicinpainting/training/modules/squeeze_excitation.py b/lama/saicinpainting/training/modules/squeeze_excitation.py new file mode 100644 index 0000000000000000000000000000000000000000..d1d902bb30c071acbc0fa919a134c80fed86bd6c --- /dev/null +++ b/lama/saicinpainting/training/modules/squeeze_excitation.py @@ -0,0 +1,20 @@ +import torch.nn as nn + + +class SELayer(nn.Module): + def __init__(self, channel, reduction=16): + super(SELayer, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Sequential( + nn.Linear(channel, channel // reduction, bias=False), + nn.ReLU(inplace=True), + nn.Linear(channel // reduction, channel, bias=False), + nn.Sigmoid() + ) + + def forward(self, x): + b, c, _, _ = x.size() + y = self.avg_pool(x).view(b, c) + y = self.fc(y).view(b, c, 1, 1) + res = x * y.expand_as(x) + return res diff --git a/lama/saicinpainting/training/trainers/__init__.py b/lama/saicinpainting/training/trainers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..edcb60e796aab6291b8d066569eeadde4bf5593d --- /dev/null +++ b/lama/saicinpainting/training/trainers/__init__.py @@ -0,0 +1,30 @@ +import logging +import torch +from ....saicinpainting.training.trainers.default import DefaultInpaintingTrainingModule + + +def get_training_model_class(kind): + if kind == 'default': + return DefaultInpaintingTrainingModule + + raise ValueError(f'Unknown trainer module {kind}') + + +def make_training_model(config): + kind = config.training_model.kind + kwargs = dict(config.training_model) + kwargs.pop('kind') + kwargs['use_ddp'] = config.trainer.kwargs.get('accelerator', None) == 'ddp' + + logging.info(f'Make training model {kind}') + + cls = get_training_model_class(kind) + return cls(config, **kwargs) + + +def load_checkpoint(train_config, path, map_location='cuda', strict=True): + model: torch.nn.Module = make_training_model(train_config) + state = torch.load(path, map_location=map_location) + model.load_state_dict(state['state_dict'], strict=strict) + model.on_load_checkpoint(state) + return model diff --git a/lama/saicinpainting/training/trainers/base.py b/lama/saicinpainting/training/trainers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..17a8788cfce7c154d2470cf5f5a6efc4f1fcd083 --- /dev/null +++ b/lama/saicinpainting/training/trainers/base.py @@ -0,0 +1,291 @@ +import copy +import logging +from typing import Dict, Tuple + +import pandas as pd +import pytorch_lightning as ptl +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DistributedSampler + +from ....saicinpainting.evaluation import make_evaluator +from ....saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader +from ....saicinpainting.training.losses.adversarial import make_discrim_loss +from ....saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL +from ....saicinpainting.training.modules import make_generator, make_discriminator +from ....saicinpainting.training.visualizers import make_visualizer +from ....saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \ + get_has_ddp_rank + +LOGGER = logging.getLogger(__name__) + + +def make_optimizer(parameters, kind='adamw', **kwargs): + if kind == 'adam': + optimizer_class = torch.optim.Adam + elif kind == 'adamw': + optimizer_class = torch.optim.AdamW + else: + raise ValueError(f'Unknown optimizer kind {kind}') + return optimizer_class(parameters, **kwargs) + + +def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999): + with torch.no_grad(): + res_params = dict(result.named_parameters()) + new_params = dict(new_iterate_model.named_parameters()) + + for k in res_params.keys(): + res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=1 - decay) + + +def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'): + batch_size, _, height, width = base_tensor.shape + cur_height, cur_width = height, width + result = [] + align_corners = False if scale_mode in ('bilinear', 'bicubic') else None + for _ in range(scales): + cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device) + cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners) + result.append(cur_sample_scaled) + cur_height //= 2 + cur_width //= 2 + return torch.cat(result, dim=1) + + +class BaseInpaintingTrainingModule(ptl.LightningModule): + def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100, + average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000, + average_generator_period=10, store_discr_outputs_for_vis=False, + **kwargs): + super().__init__(*args, **kwargs) + LOGGER.info('BaseInpaintingTrainingModule init called') + + self.config = config + + self.generator = make_generator(config, **self.config.generator) + self.use_ddp = use_ddp + + if not get_has_ddp_rank(): + LOGGER.info(f'Generator\n{self.generator}') + + if not predict_only: + self.save_hyperparameters(self.config) + self.discriminator = make_discriminator(**self.config.discriminator) + self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial) + self.visualizer = make_visualizer(**self.config.visualizer) + self.val_evaluator = make_evaluator(**self.config.evaluator) + self.test_evaluator = make_evaluator(**self.config.evaluator) + + if not get_has_ddp_rank(): + LOGGER.info(f'Discriminator\n{self.discriminator}') + + extra_val = self.config.data.get('extra_val', ()) + if extra_val: + self.extra_val_titles = list(extra_val) + self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator) + for k in extra_val}) + else: + self.extra_evaluators = {} + + self.average_generator = average_generator + self.generator_avg_beta = generator_avg_beta + self.average_generator_start_step = average_generator_start_step + self.average_generator_period = average_generator_period + self.generator_average = None + self.last_generator_averaging_step = -1 + self.store_discr_outputs_for_vis = store_discr_outputs_for_vis + + if self.config.losses.get("l1", {"weight_known": 0})['weight_known'] > 0: + self.loss_l1 = nn.L1Loss(reduction='none') + + if self.config.losses.get("mse", {"weight": 0})['weight'] > 0: + self.loss_mse = nn.MSELoss(reduction='none') + + if self.config.losses.perceptual.weight > 0: + self.loss_pl = PerceptualLoss() + + if self.config.losses.get("resnet_pl", {"weight": 0})['weight'] > 0: + self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl) + else: + self.loss_resnet_pl = None + + self.visualize_each_iters = visualize_each_iters + LOGGER.info('BaseInpaintingTrainingModule init done') + + def configure_optimizers(self): + discriminator_params = list(self.discriminator.parameters()) + return [ + dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)), + dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator)), + ] + + def train_dataloader(self): + kwargs = dict(self.config.data.train) + if self.use_ddp: + kwargs['ddp_kwargs'] = dict(num_replicas=self.trainer.num_nodes * self.trainer.num_processes, + rank=self.trainer.global_rank, + shuffle=True) + dataloader = make_default_train_dataloader(**self.config.data.train) + return dataloader + + def val_dataloader(self): + res = [make_default_val_dataloader(**self.config.data.val)] + + if self.config.data.visual_test is not None: + res = res + [make_default_val_dataloader(**self.config.data.visual_test)] + else: + res = res + res + + extra_val = self.config.data.get('extra_val', ()) + if extra_val: + res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles] + + return res + + def training_step(self, batch, batch_idx, optimizer_idx=None): + self._is_training_step = True + return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx) + + def validation_step(self, batch, batch_idx, dataloader_idx): + extra_val_key = None + if dataloader_idx == 0: + mode = 'val' + elif dataloader_idx == 1: + mode = 'test' + else: + mode = 'extra_val' + extra_val_key = self.extra_val_titles[dataloader_idx - 2] + self._is_training_step = False + return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key) + + def training_step_end(self, batch_parts_outputs): + if self.training and self.average_generator \ + and self.global_step >= self.average_generator_start_step \ + and self.global_step >= self.last_generator_averaging_step + self.average_generator_period: + if self.generator_average is None: + self.generator_average = copy.deepcopy(self.generator) + else: + update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta) + self.last_generator_averaging_step = self.global_step + + full_loss = (batch_parts_outputs['loss'].mean() + if torch.is_tensor(batch_parts_outputs['loss']) # loss is not tensor when no discriminator used + else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True)) + log_info = {k: v.mean() for k, v in batch_parts_outputs['log_info'].items()} + self.log_dict(log_info, on_step=True, on_epoch=False) + return full_loss + + def validation_epoch_end(self, outputs): + outputs = [step_out for out_group in outputs for step_out in out_group] + averaged_logs = average_dicts(step_out['log_info'] for step_out in outputs) + self.log_dict({k: v.mean() for k, v in averaged_logs.items()}) + + pd.set_option('display.max_columns', 500) + pd.set_option('display.width', 1000) + + # standard validation + val_evaluator_states = [s['val_evaluator_state'] for s in outputs if 'val_evaluator_state' in s] + val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states) + val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0) + val_evaluator_res_df.dropna(axis=1, how='all', inplace=True) + LOGGER.info(f'Validation metrics after epoch #{self.current_epoch}, ' + f'total {self.global_step} iterations:\n{val_evaluator_res_df}') + + for k, v in flatten_dict(val_evaluator_res).items(): + self.log(f'val_{k}', v) + + # standard visual test + test_evaluator_states = [s['test_evaluator_state'] for s in outputs + if 'test_evaluator_state' in s] + test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states) + test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0) + test_evaluator_res_df.dropna(axis=1, how='all', inplace=True) + LOGGER.info(f'Test metrics after epoch #{self.current_epoch}, ' + f'total {self.global_step} iterations:\n{test_evaluator_res_df}') + + for k, v in flatten_dict(test_evaluator_res).items(): + self.log(f'test_{k}', v) + + # extra validations + if self.extra_evaluators: + for cur_eval_title, cur_evaluator in self.extra_evaluators.items(): + cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state' + cur_states = [s[cur_state_key] for s in outputs if cur_state_key in s] + cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states) + cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0) + cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True) + LOGGER.info(f'Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, ' + f'total {self.global_step} iterations:\n{cur_evaluator_res_df}') + for k, v in flatten_dict(cur_evaluator_res).items(): + self.log(f'extra_val_{cur_eval_title}_{k}', v) + + def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None): + if optimizer_idx == 0: # step for generator + set_requires_grad(self.generator, True) + set_requires_grad(self.discriminator, False) + elif optimizer_idx == 1: # step for discriminator + set_requires_grad(self.generator, False) + set_requires_grad(self.discriminator, True) + + batch = self(batch) + + total_loss = 0 + metrics = {} + + if optimizer_idx is None or optimizer_idx == 0: # step for generator + total_loss, metrics = self.generator_loss(batch) + + elif optimizer_idx is None or optimizer_idx == 1: # step for discriminator + if self.config.losses.adversarial.weight > 0: + total_loss, metrics = self.discriminator_loss(batch) + + if self.get_ddp_rank() in (None, 0) and (batch_idx % self.visualize_each_iters == 0 or mode == 'test'): + if self.config.losses.adversarial.weight > 0: + if self.store_discr_outputs_for_vis: + with torch.no_grad(): + self.store_discr_outputs(batch) + vis_suffix = f'_{mode}' + if mode == 'extra_val': + vis_suffix += f'_{extra_val_key}' + self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix) + + metrics_prefix = f'{mode}_' + if mode == 'extra_val': + metrics_prefix += f'{extra_val_key}_' + result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix)) + if mode == 'val': + result['val_evaluator_state'] = self.val_evaluator.process_batch(batch) + elif mode == 'test': + result['test_evaluator_state'] = self.test_evaluator.process_batch(batch) + elif mode == 'extra_val': + result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch) + + return result + + def get_current_generator(self, no_average=False): + if not no_average and not self.training and self.average_generator and self.generator_average is not None: + return self.generator_average + return self.generator + + def forward(self, batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: + """Pass data through generator and obtain at leas 'predicted_image' and 'inpainted' keys""" + raise NotImplementedError() + + def generator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + raise NotImplementedError() + + def discriminator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + raise NotImplementedError() + + def store_discr_outputs(self, batch): + out_size = batch['image'].shape[2:] + discr_real_out, _ = self.discriminator(batch['image']) + discr_fake_out, _ = self.discriminator(batch['predicted_image']) + batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest') + batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest') + batch['discr_output_diff'] = batch['discr_output_real'] - batch['discr_output_fake'] + + def get_ddp_rank(self): + return self.trainer.global_rank if (self.trainer.num_nodes * self.trainer.num_processes) > 1 else None diff --git a/lama/saicinpainting/training/trainers/default.py b/lama/saicinpainting/training/trainers/default.py new file mode 100644 index 0000000000000000000000000000000000000000..f03a8bcf2ae6d36fe2eb01e4673b10dcecd808ca --- /dev/null +++ b/lama/saicinpainting/training/trainers/default.py @@ -0,0 +1,175 @@ +import logging + +import torch +import torch.nn.functional as F +from omegaconf import OmegaConf + +from ....saicinpainting.training.data.datasets import make_constant_area_crop_params +from ....saicinpainting.training.losses.distance_weighting import make_mask_distance_weighter +from ....saicinpainting.training.losses.feature_matching import feature_matching_loss, masked_l1_loss +from ....saicinpainting.training.modules.fake_fakes import FakeFakesGenerator +from ....saicinpainting.training.trainers.base import BaseInpaintingTrainingModule, make_multiscale_noise +from ....saicinpainting.utils import add_prefix_to_keys, get_ramp + +LOGGER = logging.getLogger(__name__) + + +def make_constant_area_crop_batch(batch, **kwargs): + crop_y, crop_x, crop_height, crop_width = make_constant_area_crop_params(img_height=batch['image'].shape[2], + img_width=batch['image'].shape[3], + **kwargs) + batch['image'] = batch['image'][:, :, crop_y : crop_y + crop_height, crop_x : crop_x + crop_width] + batch['mask'] = batch['mask'][:, :, crop_y: crop_y + crop_height, crop_x: crop_x + crop_width] + return batch + + +class DefaultInpaintingTrainingModule(BaseInpaintingTrainingModule): + def __init__(self, *args, concat_mask=True, rescale_scheduler_kwargs=None, image_to_discriminator='predicted_image', + add_noise_kwargs=None, noise_fill_hole=False, const_area_crop_kwargs=None, + distance_weighter_kwargs=None, distance_weighted_mask_for_discr=False, + fake_fakes_proba=0, fake_fakes_generator_kwargs=None, + **kwargs): + super().__init__(*args, **kwargs) + self.concat_mask = concat_mask + self.rescale_size_getter = get_ramp(**rescale_scheduler_kwargs) if rescale_scheduler_kwargs is not None else None + self.image_to_discriminator = image_to_discriminator + self.add_noise_kwargs = add_noise_kwargs + self.noise_fill_hole = noise_fill_hole + self.const_area_crop_kwargs = const_area_crop_kwargs + self.refine_mask_for_losses = make_mask_distance_weighter(**distance_weighter_kwargs) \ + if distance_weighter_kwargs is not None else None + self.distance_weighted_mask_for_discr = distance_weighted_mask_for_discr + + self.fake_fakes_proba = fake_fakes_proba + if self.fake_fakes_proba > 1e-3: + self.fake_fakes_gen = FakeFakesGenerator(**(fake_fakes_generator_kwargs or {})) + + def forward(self, batch): + if self.training and self.rescale_size_getter is not None: + cur_size = self.rescale_size_getter(self.global_step) + batch['image'] = F.interpolate(batch['image'], size=cur_size, mode='bilinear', align_corners=False) + batch['mask'] = F.interpolate(batch['mask'], size=cur_size, mode='nearest') + + if self.training and self.const_area_crop_kwargs is not None: + batch = make_constant_area_crop_batch(batch, **self.const_area_crop_kwargs) + + img = batch['image'] + mask = batch['mask'] + + masked_img = img * (1 - mask) + + if self.add_noise_kwargs is not None: + noise = make_multiscale_noise(masked_img, **self.add_noise_kwargs) + if self.noise_fill_hole: + masked_img = masked_img + mask * noise[:, :masked_img.shape[1]] + masked_img = torch.cat([masked_img, noise], dim=1) + + if self.concat_mask: + masked_img = torch.cat([masked_img, mask], dim=1) + + batch['predicted_image'] = self.generator(masked_img) + batch['inpainted'] = mask * batch['predicted_image'] + (1 - mask) * batch['image'] + + if self.fake_fakes_proba > 1e-3: + if self.training and torch.rand(1).item() < self.fake_fakes_proba: + batch['fake_fakes'], batch['fake_fakes_masks'] = self.fake_fakes_gen(img, mask) + batch['use_fake_fakes'] = True + else: + batch['fake_fakes'] = torch.zeros_like(img) + batch['fake_fakes_masks'] = torch.zeros_like(mask) + batch['use_fake_fakes'] = False + + batch['mask_for_losses'] = self.refine_mask_for_losses(img, batch['predicted_image'], mask) \ + if self.refine_mask_for_losses is not None and self.training \ + else mask + + return batch + + def generator_loss(self, batch): + img = batch['image'] + predicted_img = batch[self.image_to_discriminator] + original_mask = batch['mask'] + supervised_mask = batch['mask_for_losses'] + + # L1 + l1_value = masked_l1_loss(predicted_img, img, supervised_mask, + self.config.losses.l1.weight_known, + self.config.losses.l1.weight_missing) + + total_loss = l1_value + metrics = dict(gen_l1=l1_value) + + # vgg-based perceptual loss + if self.config.losses.perceptual.weight > 0: + pl_value = self.loss_pl(predicted_img, img, mask=supervised_mask).sum() * self.config.losses.perceptual.weight + total_loss = total_loss + pl_value + metrics['gen_pl'] = pl_value + + # discriminator + # adversarial_loss calls backward by itself + mask_for_discr = supervised_mask if self.distance_weighted_mask_for_discr else original_mask + self.adversarial_loss.pre_generator_step(real_batch=img, fake_batch=predicted_img, + generator=self.generator, discriminator=self.discriminator) + discr_real_pred, discr_real_features = self.discriminator(img) + discr_fake_pred, discr_fake_features = self.discriminator(predicted_img) + adv_gen_loss, adv_metrics = self.adversarial_loss.generator_loss(real_batch=img, + fake_batch=predicted_img, + discr_real_pred=discr_real_pred, + discr_fake_pred=discr_fake_pred, + mask=mask_for_discr) + total_loss = total_loss + adv_gen_loss + metrics['gen_adv'] = adv_gen_loss + metrics.update(add_prefix_to_keys(adv_metrics, 'adv_')) + + # feature matching + if self.config.losses.feature_matching.weight > 0: + need_mask_in_fm = OmegaConf.to_container(self.config.losses.feature_matching).get('pass_mask', False) + mask_for_fm = supervised_mask if need_mask_in_fm else None + fm_value = feature_matching_loss(discr_fake_features, discr_real_features, + mask=mask_for_fm) * self.config.losses.feature_matching.weight + total_loss = total_loss + fm_value + metrics['gen_fm'] = fm_value + + if self.loss_resnet_pl is not None: + resnet_pl_value = self.loss_resnet_pl(predicted_img, img) + total_loss = total_loss + resnet_pl_value + metrics['gen_resnet_pl'] = resnet_pl_value + + return total_loss, metrics + + def discriminator_loss(self, batch): + total_loss = 0 + metrics = {} + + predicted_img = batch[self.image_to_discriminator].detach() + self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=predicted_img, + generator=self.generator, discriminator=self.discriminator) + discr_real_pred, discr_real_features = self.discriminator(batch['image']) + discr_fake_pred, discr_fake_features = self.discriminator(predicted_img) + adv_discr_loss, adv_metrics = self.adversarial_loss.discriminator_loss(real_batch=batch['image'], + fake_batch=predicted_img, + discr_real_pred=discr_real_pred, + discr_fake_pred=discr_fake_pred, + mask=batch['mask']) + total_loss = total_loss + adv_discr_loss + metrics['discr_adv'] = adv_discr_loss + metrics.update(add_prefix_to_keys(adv_metrics, 'adv_')) + + + if batch.get('use_fake_fakes', False): + fake_fakes = batch['fake_fakes'] + self.adversarial_loss.pre_discriminator_step(real_batch=batch['image'], fake_batch=fake_fakes, + generator=self.generator, discriminator=self.discriminator) + discr_fake_fakes_pred, _ = self.discriminator(fake_fakes) + fake_fakes_adv_discr_loss, fake_fakes_adv_metrics = self.adversarial_loss.discriminator_loss( + real_batch=batch['image'], + fake_batch=fake_fakes, + discr_real_pred=discr_real_pred, + discr_fake_pred=discr_fake_fakes_pred, + mask=batch['mask'] + ) + total_loss = total_loss + fake_fakes_adv_discr_loss + metrics['discr_adv_fake_fakes'] = fake_fakes_adv_discr_loss + metrics.update(add_prefix_to_keys(fake_fakes_adv_metrics, 'adv_')) + + return total_loss, metrics diff --git a/lama/saicinpainting/training/visualizers/__init__.py b/lama/saicinpainting/training/visualizers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..76677f4606e1c8221e6432fa547f3c72195b2992 --- /dev/null +++ b/lama/saicinpainting/training/visualizers/__init__.py @@ -0,0 +1,15 @@ +import logging + +from ....saicinpainting.training.visualizers.directory import DirectoryVisualizer +from ....saicinpainting.training.visualizers.noop import NoopVisualizer + + +def make_visualizer(kind, **kwargs): + logging.info(f'Make visualizer {kind}') + + if kind == 'directory': + return DirectoryVisualizer(**kwargs) + if kind == 'noop': + return NoopVisualizer() + + raise ValueError(f'Unknown visualizer kind {kind}') diff --git a/lama/saicinpainting/training/visualizers/base.py b/lama/saicinpainting/training/visualizers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..675f01682ddf5e31b6cc341735378c6f3b242e49 --- /dev/null +++ b/lama/saicinpainting/training/visualizers/base.py @@ -0,0 +1,73 @@ +import abc +from typing import Dict, List + +import numpy as np +import torch +from skimage import color +from skimage.segmentation import mark_boundaries + +from . import colors + +COLORS, _ = colors.generate_colors(151) # 151 - max classes for semantic segmentation + + +class BaseVisualizer: + @abc.abstractmethod + def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): + """ + Take a batch, make an image from it and visualize + """ + raise NotImplementedError() + + +def visualize_mask_and_images(images_dict: Dict[str, np.ndarray], keys: List[str], + last_without_mask=True, rescale_keys=None, mask_only_first=None, + black_mask=False) -> np.ndarray: + mask = images_dict['mask'] > 0.5 + result = [] + for i, k in enumerate(keys): + img = images_dict[k] + img = np.transpose(img, (1, 2, 0)) + + if rescale_keys is not None and k in rescale_keys: + img = img - img.min() + img /= img.max() + 1e-5 + if len(img.shape) == 2: + img = np.expand_dims(img, 2) + + if img.shape[2] == 1: + img = np.repeat(img, 3, axis=2) + elif (img.shape[2] > 3): + img_classes = img.argmax(2) + img = color.label2rgb(img_classes, colors=COLORS) + + if mask_only_first: + need_mark_boundaries = i == 0 + else: + need_mark_boundaries = i < len(keys) - 1 or not last_without_mask + + if need_mark_boundaries: + if black_mask: + img = img * (1 - mask[0][..., None]) + img = mark_boundaries(img, + mask[0], + color=(1., 0., 0.), + outline_color=(1., 1., 1.), + mode='thick') + result.append(img) + return np.concatenate(result, axis=1) + + +def visualize_mask_and_images_batch(batch: Dict[str, torch.Tensor], keys: List[str], max_items=10, + last_without_mask=True, rescale_keys=None) -> np.ndarray: + batch = {k: tens.detach().cpu().numpy() for k, tens in batch.items() + if k in keys or k == 'mask'} + + batch_size = next(iter(batch.values())).shape[0] + items_to_vis = min(batch_size, max_items) + result = [] + for i in range(items_to_vis): + cur_dct = {k: tens[i] for k, tens in batch.items()} + result.append(visualize_mask_and_images(cur_dct, keys, last_without_mask=last_without_mask, + rescale_keys=rescale_keys)) + return np.concatenate(result, axis=0) diff --git a/lama/saicinpainting/training/visualizers/colors.py b/lama/saicinpainting/training/visualizers/colors.py new file mode 100644 index 0000000000000000000000000000000000000000..9e9e39182c58cb06a1c5e97a7e6c497cc3388ebe --- /dev/null +++ b/lama/saicinpainting/training/visualizers/colors.py @@ -0,0 +1,76 @@ +import random +import colorsys + +import numpy as np +import matplotlib +matplotlib.use('agg') +import matplotlib.pyplot as plt +from matplotlib.colors import LinearSegmentedColormap + + +def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False): + # https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib + """ + Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks + :param nlabels: Number of labels (size of colormap) + :param type: 'bright' for strong colors, 'soft' for pastel colors + :param first_color_black: Option to use first color as black, True or False + :param last_color_black: Option to use last color as black, True or False + :param verbose: Prints the number of labels and shows the colormap. True or False + :return: colormap for matplotlib + """ + if type not in ('bright', 'soft'): + print ('Please choose "bright" or "soft" for type') + return + + if verbose: + print('Number of labels: ' + str(nlabels)) + + # Generate color map for bright colors, based on hsv + if type == 'bright': + randHSVcolors = [(np.random.uniform(low=0.0, high=1), + np.random.uniform(low=0.2, high=1), + np.random.uniform(low=0.9, high=1)) for i in range(nlabels)] + + # Convert HSV list to RGB + randRGBcolors = [] + for HSVcolor in randHSVcolors: + randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2])) + + if first_color_black: + randRGBcolors[0] = [0, 0, 0] + + if last_color_black: + randRGBcolors[-1] = [0, 0, 0] + + random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) + + # Generate soft pastel colors, by limiting the RGB spectrum + if type == 'soft': + low = 0.6 + high = 0.95 + randRGBcolors = [(np.random.uniform(low=low, high=high), + np.random.uniform(low=low, high=high), + np.random.uniform(low=low, high=high)) for i in range(nlabels)] + + if first_color_black: + randRGBcolors[0] = [0, 0, 0] + + if last_color_black: + randRGBcolors[-1] = [0, 0, 0] + random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) + + # Display colorbar + if verbose: + from matplotlib import colors, colorbar + from matplotlib import pyplot as plt + fig, ax = plt.subplots(1, 1, figsize=(15, 0.5)) + + bounds = np.linspace(0, nlabels, nlabels + 1) + norm = colors.BoundaryNorm(bounds, nlabels) + + cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None, + boundaries=bounds, format='%1i', orientation=u'horizontal') + + return randRGBcolors, random_colormap + diff --git a/lama/saicinpainting/training/visualizers/directory.py b/lama/saicinpainting/training/visualizers/directory.py new file mode 100644 index 0000000000000000000000000000000000000000..49142b67a9a3b3bd3824e759d786957f682b455b --- /dev/null +++ b/lama/saicinpainting/training/visualizers/directory.py @@ -0,0 +1,36 @@ +import os + +import cv2 +import numpy as np + +from ....saicinpainting.training.visualizers.base import BaseVisualizer, visualize_mask_and_images_batch +from ....saicinpainting.utils import check_and_warn_input_range + + +class DirectoryVisualizer(BaseVisualizer): + DEFAULT_KEY_ORDER = 'image predicted_image inpainted'.split(' ') + + def __init__(self, outdir, key_order=DEFAULT_KEY_ORDER, max_items_in_batch=10, + last_without_mask=True, rescale_keys=None): + self.outdir = outdir + os.makedirs(self.outdir, exist_ok=True) + self.key_order = key_order + self.max_items_in_batch = max_items_in_batch + self.last_without_mask = last_without_mask + self.rescale_keys = rescale_keys + + def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): + check_and_warn_input_range(batch['image'], 0, 1, 'DirectoryVisualizer target image') + vis_img = visualize_mask_and_images_batch(batch, self.key_order, max_items=self.max_items_in_batch, + last_without_mask=self.last_without_mask, + rescale_keys=self.rescale_keys) + + vis_img = np.clip(vis_img * 255, 0, 255).astype('uint8') + + curoutdir = os.path.join(self.outdir, f'epoch{epoch_i:04d}{suffix}') + os.makedirs(curoutdir, exist_ok=True) + rank_suffix = f'_r{rank}' if rank is not None else '' + out_fname = os.path.join(curoutdir, f'batch{batch_i:07d}{rank_suffix}.jpg') + + vis_img = cv2.cvtColor(vis_img, cv2.COLOR_RGB2BGR) + cv2.imwrite(out_fname, vis_img) diff --git a/lama/saicinpainting/training/visualizers/noop.py b/lama/saicinpainting/training/visualizers/noop.py new file mode 100644 index 0000000000000000000000000000000000000000..6cc9f5fc7dc303de9bdeccef98af8de1a2af55d4 --- /dev/null +++ b/lama/saicinpainting/training/visualizers/noop.py @@ -0,0 +1,9 @@ +from ....saicinpainting.training.visualizers.base import BaseVisualizer + + +class NoopVisualizer(BaseVisualizer): + def __init__(self, *args, **kwargs): + pass + + def __call__(self, epoch_i, batch_i, batch, suffix='', rank=None): + pass diff --git a/lama/saicinpainting/utils.py b/lama/saicinpainting/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c2d67ed8bc793dd5113224fa322adb88f3ed9b22 --- /dev/null +++ b/lama/saicinpainting/utils.py @@ -0,0 +1,177 @@ +import bisect +import functools +import logging +import numbers +import os +import signal +import sys +import traceback +import warnings + +import torch +from pytorch_lightning import seed_everything + +LOGGER = logging.getLogger(__name__) + +import platform +if platform.system() != 'Linux': + signal.SIGUSR1 = 1 + +def check_and_warn_input_range(tensor, min_value, max_value, name): + actual_min = tensor.min() + actual_max = tensor.max() + if actual_min < min_value or actual_max > max_value: + warnings.warn(f"{name} must be in {min_value}..{max_value} range, but it ranges {actual_min}..{actual_max}") + + +def sum_dict_with_prefix(target, cur_dict, prefix, default=0): + for k, v in cur_dict.items(): + target_key = prefix + k + target[target_key] = target.get(target_key, default) + v + + +def average_dicts(dict_list): + result = {} + norm = 1e-3 + for dct in dict_list: + sum_dict_with_prefix(result, dct, '') + norm += 1 + for k in list(result): + result[k] /= norm + return result + + +def add_prefix_to_keys(dct, prefix): + return {prefix + k: v for k, v in dct.items()} + + +def set_requires_grad(module, value): + for param in module.parameters(): + param.requires_grad = value + + +def flatten_dict(dct): + result = {} + for k, v in dct.items(): + if isinstance(k, tuple): + k = '_'.join(k) + if isinstance(v, dict): + for sub_k, sub_v in flatten_dict(v).items(): + result[f'{k}_{sub_k}'] = sub_v + else: + result[k] = v + return result + + +class LinearRamp: + def __init__(self, start_value=0, end_value=1, start_iter=-1, end_iter=0): + self.start_value = start_value + self.end_value = end_value + self.start_iter = start_iter + self.end_iter = end_iter + + def __call__(self, i): + if i < self.start_iter: + return self.start_value + if i >= self.end_iter: + return self.end_value + part = (i - self.start_iter) / (self.end_iter - self.start_iter) + return self.start_value * (1 - part) + self.end_value * part + + +class LadderRamp: + def __init__(self, start_iters, values): + self.start_iters = start_iters + self.values = values + assert len(values) == len(start_iters) + 1, (len(values), len(start_iters)) + + def __call__(self, i): + segment_i = bisect.bisect_right(self.start_iters, i) + return self.values[segment_i] + + +def get_ramp(kind='ladder', **kwargs): + if kind == 'linear': + return LinearRamp(**kwargs) + if kind == 'ladder': + return LadderRamp(**kwargs) + raise ValueError(f'Unexpected ramp kind: {kind}') + + +def print_traceback_handler(sig, frame): + LOGGER.warning(f'Received signal {sig}') + bt = ''.join(traceback.format_stack()) + LOGGER.warning(f'Requested stack trace:\n{bt}') + + +def register_debug_signal_handlers(sig=signal.SIGUSR1, handler=print_traceback_handler): + LOGGER.warning(f'Setting signal {sig} handler {handler}') + signal.signal(sig, handler) + + +def handle_deterministic_config(config): + seed = dict(config).get('seed', None) + if seed is None: + return False + + seed_everything(seed) + return True + + +def get_shape(t): + if torch.is_tensor(t): + return tuple(t.shape) + elif isinstance(t, dict): + return {n: get_shape(q) for n, q in t.items()} + elif isinstance(t, (list, tuple)): + return [get_shape(q) for q in t] + elif isinstance(t, numbers.Number): + return type(t) + else: + raise ValueError('unexpected type {}'.format(type(t))) + + +def get_has_ddp_rank(): + master_port = os.environ.get('MASTER_PORT', None) + node_rank = os.environ.get('NODE_RANK', None) + local_rank = os.environ.get('LOCAL_RANK', None) + world_size = os.environ.get('WORLD_SIZE', None) + has_rank = master_port is not None or node_rank is not None or local_rank is not None or world_size is not None + return has_rank + + +def handle_ddp_subprocess(): + def main_decorator(main_func): + @functools.wraps(main_func) + def new_main(*args, **kwargs): + # Trainer sets MASTER_PORT, NODE_RANK, LOCAL_RANK, WORLD_SIZE + parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) + has_parent = parent_cwd is not None + has_rank = get_has_ddp_rank() + assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' + + if has_parent: + # we are in the worker + sys.argv.extend([ + f'hydra.run.dir={parent_cwd}', + # 'hydra/hydra_logging=disabled', + # 'hydra/job_logging=disabled' + ]) + # do nothing if this is a top-level process + # TRAINING_PARENT_WORK_DIR is set in handle_ddp_parent_process after hydra initialization + + main_func(*args, **kwargs) + return new_main + return main_decorator + + +def handle_ddp_parent_process(): + parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None) + has_parent = parent_cwd is not None + has_rank = get_has_ddp_rank() + assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}' + + if parent_cwd is None: + os.environ['TRAINING_PARENT_WORK_DIR'] = os.getcwd() + + return has_parent diff --git a/package.txt b/package.txt new file mode 100644 index 0000000000000000000000000000000000000000..8504f5c96cd44f720ac45715c09352e5804dac10 --- /dev/null +++ b/package.txt @@ -0,0 +1 @@ +python3-dev \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..11d1f50eae65e07255d2693186fd3b310c732a76 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,8 @@ +[build-system] +requires = ["flit_core >=3.2,<4"] +build-backend = "flit_core.buildapi" + +[project] +name = "PyTorch-SVGRender" +authors = [{name = "Ximing Xing", email = "ximingxing@gmail.com"}] +dynamic = ["version", "description"] diff --git a/pytorch_svgrender/__init__.py b/pytorch_svgrender/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..40d49bb84dfa3d10f1b2d8c751cb93082a3878bc --- /dev/null +++ b/pytorch_svgrender/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Copyright (c) 2023, XiMing Xing. +# License: MPL-2.0 License + +__version__ = "1.0" diff --git a/pytorch_svgrender/diffusers_warp/__init__.py b/pytorch_svgrender/diffusers_warp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9657b1234ca9fceb808eb9deda0e696c9b5fbbc2 --- /dev/null +++ b/pytorch_svgrender/diffusers_warp/__init__.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +from typing import AnyStr +import pathlib +from collections import OrderedDict +from packaging import version + +import torch +from diffusers import StableDiffusionPipeline, SchedulerMixin +from diffusers import UNet2DConditionModel +from diffusers.utils import is_torch_version, is_xformers_available + +DiffusersModels = OrderedDict({ + "sd14": "CompVis/stable-diffusion-v1-4", # resolution: 512 + "sd15": "runwayml/stable-diffusion-v1-5", # resolution: 512 + "sd21b": "stabilityai/stable-diffusion-2-1-base", # resolution: 512 + "sd21": "stabilityai/stable-diffusion-2-1", # resolution: 768 + "sdxl": "stabilityai/stable-diffusion-xl-base-1.0", # resolution: 1024 +}) + +# default resolution +_model2resolution = { + "sd14": 512, + "sd15": 512, + "sd21b": 512, + "sd21": 768, + "sdxl": 1024, +} + + +def model2res(model_id: str): + return _model2resolution.get(model_id, 512) + + +def init_StableDiffusion_pipeline(model_id: AnyStr, + custom_pipeline: StableDiffusionPipeline, + custom_scheduler: SchedulerMixin = None, + device: torch.device = "cuda", + torch_dtype: torch.dtype = torch.float32, + local_files_only: bool = True, + force_download: bool = False, + resume_download: bool = False, + ldm_speed_up: bool = False, + enable_xformers: bool = True, + gradient_checkpoint: bool = False, + cpu_offload: bool = False, + vae_slicing: bool = False, + lora_path: AnyStr = None, + unet_path: AnyStr = None) -> StableDiffusionPipeline: + """ + A tool for initial diffusers pipeline. + + Args: + model_id (`str` or `os.PathLike`, *optional*): pretrained_model_name_or_path + custom_pipeline: any StableDiffusionPipeline pipeline + custom_scheduler: any scheduler + device: set device + torch_dtype: data type + local_files_only: prohibited download model + force_download: forced download model + resume_download: re-download model + ldm_speed_up: use the `torch.compile` api to speed up unet + enable_xformers: enable memory efficient attention from [xFormers] + gradient_checkpoint: activates gradient checkpointing for the current model + cpu_offload: enable sequential cpu offload + vae_slicing: enable sliced VAE decoding + lora_path: load LoRA checkpoint + unet_path: load unet checkpoint + + Returns: + diffusers.StableDiffusionPipeline + """ + + # get model id + model_id = DiffusersModels.get(model_id, model_id) + + # process diffusion model + if custom_scheduler is not None: + pipeline = custom_pipeline.from_pretrained( + model_id, + torch_dtype=torch_dtype, + local_files_only=local_files_only, + force_download=force_download, + resume_download=resume_download, + scheduler=custom_scheduler.from_pretrained(model_id, + subfolder="scheduler", + local_files_only=local_files_only, + force_download=force_download, + resume_download=resume_download) + ).to(device) + else: + pipeline = custom_pipeline.from_pretrained( + model_id, + torch_dtype=torch_dtype, + local_files_only=local_files_only, + force_download=force_download, + resume_download=resume_download, + ).to(device) + + print(f"load diffusers pipeline: {model_id}") + + # process unet model if exist + if unet_path is not None and pathlib.Path(unet_path).exists(): + print(f"=> load u-net from {unet_path}") + pipeline.unet.from_pretrained(model_id, subfolder="unet") + + # process lora layers if exist + if lora_path is not None and pathlib.Path(lora_path).exists(): + pipeline.unet.load_attn_procs(lora_path) + print(f"=> load lora layers into U-Net from {lora_path} ...") + + # torch.compile + if ldm_speed_up: + if is_torch_version(">=", "2.0.0"): + pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True) + print(f"=> enable torch.compile on U-Net") + else: + print(f"=> warning: calling torch.compile speed-up failed, since torch version <= 2.0.0") + + # Meta xformers + if enable_xformers: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + print( + "xFormers 0.0.16 cannot be used for training in some GPUs. " + "If you observe problems during training, please update xFormers to at least 0.0.17. " + "See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + print(f"=> enable xformers") + pipeline.unet.enable_xformers_memory_efficient_attention() + else: + print(f"=> warning: xformers is not available.") + + # gradient checkpointing + if gradient_checkpoint: + # if pipeline.unet.is_gradient_checkpointing: + if True: + print(f"=> enable gradient checkpointing") + pipeline.unet.enable_gradient_checkpointing() + else: + print("=> waring: gradient checkpointing is not activated for this model.") + + if cpu_offload: + pipeline.enable_sequential_cpu_offload() + + if vae_slicing: + pipeline.enable_vae_slicing() + + print(pipeline.scheduler) + return pipeline + + +def init_diffusers_unet(model_id: AnyStr, + device: torch.device = "cuda", + torch_dtype: torch.dtype = torch.float32, + local_files_only: bool = True, + force_download: bool = False, + resume_download: bool = False, + ldm_speed_up: bool = False, + enable_xformers: bool = True, + gradient_checkpoint: bool = False, + lora_path: AnyStr = None, + unet_path: AnyStr = None): + """ + A tool for initial diffusers UNet model. + + Args: + model_id (`str` or `os.PathLike`, *optional*): pretrained_model_name_or_path + device: set device + torch_dtype: data type + local_files_only: prohibited download model + force_download: forced download model + resume_download: re-download model + ldm_speed_up: use the `torch.compile` api to speed up unet + enable_xformers: enable memory efficient attention from [xFormers] + gradient_checkpoint: activates gradient checkpointing for the current model + lora_path: load LoRA checkpoint + unet_path: load unet checkpoint + + Returns: + diffusers.UNet + """ + + # get model id + model_id = DiffusersModels.get(model_id, model_id) + + # process UNet model + unet = UNet2DConditionModel.from_pretrained( + model_id, + subfolder="unet", + torch_dtype=torch_dtype, + local_files_only=local_files_only, + force_download=force_download, + resume_download=resume_download, + ).to(device) + + print(f"load diffusers UNet: {model_id}") + + # process unet model if exist + if unet_path is not None and pathlib.Path(unet_path).exists(): + print(f"=> load u-net from {unet_path}") + unet.from_pretrained(model_id) + + # process lora layers if exist + if lora_path is not None and pathlib.Path(lora_path).exists(): + unet.load_attn_procs(lora_path) + print(f"=> load lora layers into U-Net from {lora_path} ...") + + # torch.compile + if ldm_speed_up: + if is_torch_version(">=", "2.0.0"): + unet = torch.compile(unet, mode="reduce-overhead", fullgraph=True) + print(f"=> enable torch.compile on U-Net") + else: + print(f"=> warning: calling torch.compile speed-up failed, since torch version <= 2.0.0") + + # Meta xformers + if enable_xformers: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + print( + "xFormers 0.0.16 cannot be used for training in some GPUs. " + "If you observe problems during training, please update xFormers to at least 0.0.17. " + "See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + print(f"=> enable xformers") + unet.enable_xformers_memory_efficient_attention() + else: + print(f"=> warning: xformers is not available.") + + # gradient checkpointing + if gradient_checkpoint: + # if unet.is_gradient_checkpointing: + if True: + print(f"=> enable gradient checkpointing") + unet.enable_gradient_checkpointing() + else: + print("=> waring: gradient checkpointing is not activated for this model.") + + return unet diff --git a/pytorch_svgrender/diffvg_warp/__init__.py b/pytorch_svgrender/diffvg_warp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a97c9500f48e17361ce5ba881bc76d2322f5d7ce --- /dev/null +++ b/pytorch_svgrender/diffvg_warp/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .diffvg_state import DiffVGState, init_pydiffvg + +__all__ = [ + 'DiffVGState', + 'init_pydiffvg' +] diff --git a/pytorch_svgrender/diffvg_warp/diffvg_state.py b/pytorch_svgrender/diffvg_warp/diffvg_state.py new file mode 100644 index 0000000000000000000000000000000000000000..e2a2e3636136a7f13cf88f34ce1947f681de332a --- /dev/null +++ b/pytorch_svgrender/diffvg_warp/diffvg_state.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: parent class +# Copyright (c) 2023, XiMing Xing. +# License: MIT License +import pathlib +from typing import AnyStr, List, Union +import xml.etree.ElementTree as etree + +import torch +import pydiffvg + + +def init_pydiffvg(device: torch.device, + use_gpu: bool = torch.cuda.is_available(), + print_timing: bool = False): + pydiffvg.set_use_gpu(use_gpu) + pydiffvg.set_device(device) + pydiffvg.set_print_timing(print_timing) + + +class DiffVGState(torch.nn.Module): + + def __init__(self, + device: torch.device, + use_gpu: bool = torch.cuda.is_available(), + print_timing: bool = False, + canvas_width: int = None, + canvas_height: int = None): + super(DiffVGState, self).__init__() + # pydiffvg device setting + self.device = device + init_pydiffvg(device, use_gpu, print_timing) + + # canvas size + self.canvas_width = canvas_width + self.canvas_height = canvas_height + + # record all paths + self.shapes = [] + self.shape_groups = [] + # record the current optimized path + self.cur_shapes = [] + self.cur_shape_groups = [] + + # learnable SVG params + self.point_vars = [] + self.color_vars = [] + self.width_vars = [] + + def clip_curve_shape(self, *args, **kwargs): + raise NotImplementedError + + def render_warp(self, seed=0): + self.clip_curve_shape() + + scene_args = pydiffvg.RenderFunction.serialize_scene( + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups + ) + _render = pydiffvg.RenderFunction.apply + img = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + seed, # seed + None, + *scene_args) + return img + + @staticmethod + def load_svg(path_svg): + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(path_svg) + return canvas_width, canvas_height, shapes, shape_groups + + def save_svg(self, + filename: Union[AnyStr, pathlib.Path], + width: int = None, + height: int = None, + shapes: List = None, + shape_groups: List = None, + use_gamma: bool = False, + background: str = None): + """ + Save an SVG file with specified parameters and shapes. + Noting: New version of SVG saving function that is an adaptation of pydiffvg.save_svg. + The original version saved words resulting in incomplete glyphs. + + Args: + filename (str): The path to save the SVG file. + width (int): The width of the SVG canvas. + height (int): The height of the SVG canvas. + shapes (list): A list of shapes to be included in the SVG. + shape_groups (list): A list of shape groups. + use_gamma (bool): Flag indicating whether to apply gamma correction. + background (str, optional): The background color of the SVG. + + Returns: + None + """ + root = etree.Element('svg') + root.set('version', '1.1') + root.set('xmlns', 'http://www.w3.org/2000/svg') + root.set('width', str(width)) + root.set('height', str(height)) + + if background is not None: + print(f"setting background to {background}") + root.set('style', str(background)) + + defs = etree.SubElement(root, 'defs') + g = etree.SubElement(root, 'g') + + if use_gamma: + f = etree.SubElement(defs, 'filter') + f.set('id', 'gamma') + f.set('x', '0') + f.set('y', '0') + f.set('width', '100%') + f.set('height', '100%') + gamma = etree.SubElement(f, 'feComponentTransfer') + gamma.set('color-interpolation-filters', 'sRGB') + feFuncR = etree.SubElement(gamma, 'feFuncR') + feFuncR.set('type', 'gamma') + feFuncR.set('amplitude', str(1)) + feFuncR.set('exponent', str(1 / 2.2)) + feFuncG = etree.SubElement(gamma, 'feFuncG') + feFuncG.set('type', 'gamma') + feFuncG.set('amplitude', str(1)) + feFuncG.set('exponent', str(1 / 2.2)) + feFuncB = etree.SubElement(gamma, 'feFuncB') + feFuncB.set('type', 'gamma') + feFuncB.set('amplitude', str(1)) + feFuncB.set('exponent', str(1 / 2.2)) + feFuncA = etree.SubElement(gamma, 'feFuncA') + feFuncA.set('type', 'gamma') + feFuncA.set('amplitude', str(1)) + feFuncA.set('exponent', str(1 / 2.2)) + g.set('style', 'filter:url(#gamma)') + + # Store color + for i, shape_group in enumerate(shape_groups): + def add_color(shape_color, name): + if isinstance(shape_color, pydiffvg.LinearGradient): + lg = shape_color + color = etree.SubElement(defs, 'linearGradient') + color.set('id', name) + color.set('x1', str(lg.begin[0].item())) + color.set('y1', str(lg.begin[1].item())) + color.set('x2', str(lg.end[0].item())) + color.set('y2', str(lg.end[1].item())) + offsets = lg.offsets.data.cpu().numpy() + stop_colors = lg.stop_colors.data.cpu().numpy() + for j in range(offsets.shape[0]): + stop = etree.SubElement(color, 'stop') + stop.set('offset', str(offsets[j])) + c = lg.stop_colors[j, :] + stop.set('stop-color', 'rgb({}, {}, {})'.format( + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]) + )) + stop.set('stop-opacity', '{}'.format(c[3])) + if isinstance(shape_color, pydiffvg.RadialGradient): + lg = shape_color + color = etree.SubElement(defs, 'radialGradient') + color.set('id', name) + color.set('cx', str(lg.center[0].item() / width)) + color.set('cy', str(lg.center[1].item() / height)) + # this only support width=height + color.set('r', str(lg.radius[0].item() / width)) + offsets = lg.offsets.data.cpu().numpy() + stop_colors = lg.stop_colors.data.cpu().numpy() + for j in range(offsets.shape[0]): + stop = etree.SubElement(color, 'stop') + stop.set('offset', str(offsets[j])) + c = lg.stop_colors[j, :] + stop.set('stop-color', 'rgb({}, {}, {})'.format( + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]) + )) + stop.set('stop-opacity', '{}'.format(c[3])) + + if shape_group.fill_color is not None: + add_color(shape_group.fill_color, 'shape_{}_fill'.format(i)) + if shape_group.stroke_color is not None: + add_color(shape_group.stroke_color, 'shape_{}_stroke'.format(i)) + + for i, shape_group in enumerate(shape_groups): + shape = shapes[shape_group.shape_ids[0]] + if isinstance(shape, pydiffvg.Circle): + shape_node = etree.SubElement(g, 'circle') + shape_node.set('r', str(shape.radius.item())) + shape_node.set('cx', str(shape.center[0].item())) + shape_node.set('cy', str(shape.center[1].item())) + elif isinstance(shape, pydiffvg.Polygon): + shape_node = etree.SubElement(g, 'polygon') + points = shape.points.data.cpu().numpy() + path_str = '' + for j in range(0, shape.points.shape[0]): + path_str += '{} {}'.format(points[j, 0], points[j, 1]) + if j != shape.points.shape[0] - 1: + path_str += ' ' + shape_node.set('points', path_str) + elif isinstance(shape, pydiffvg.Path): + for j, id in enumerate(shape_group.shape_ids): + shape = shapes[id] + if isinstance(shape, pydiffvg.Path): + if j == 0: + shape_node = etree.SubElement(g, 'path') + node_id = shape_node.get('id') + path_str = '' + + num_segments = shape.num_control_points.shape[0] + num_control_points = shape.num_control_points.data.cpu().numpy() + points = shape.points.data.cpu().numpy() + num_points = shape.points.shape[0] + path_str += 'M {} {}'.format(points[0, 0], points[0, 1]) + point_id = 1 + for j in range(0, num_segments): + if num_control_points[j] == 0: + p = point_id % num_points + path_str += ' L {} {}'.format( + points[p, 0], points[p, 1]) + point_id += 1 + elif num_control_points[j] == 1: + p1 = (point_id + 1) % num_points + path_str += ' Q {} {} {} {}'.format( + points[point_id, 0], points[point_id, 1], + points[p1, 0], points[p1, 1]) + point_id += 2 + elif num_control_points[j] == 2: + p2 = (point_id + 2) % num_points + path_str += ' C {} {} {} {} {} {}'.format( + points[point_id, 0], points[point_id, 1], + points[point_id + 1, 0], points[point_id + 1, 1], + points[p2, 0], points[p2, 1]) + point_id += 3 + if node_id is not None: + shape_node.set('id', node_id) # add id to Path + shape_node.set('d', path_str) + elif isinstance(shape, pydiffvg.Rect): + shape_node = etree.SubElement(g, 'rect') + shape_node.set('x', str(shape.p_min[0].item())) + shape_node.set('y', str(shape.p_min[1].item())) + shape_node.set('width', str(shape.p_max[0].item() - shape.p_min[0].item())) + shape_node.set('height', str(shape.p_max[1].item() - shape.p_min[1].item())) + elif isinstance(shape, pydiffvg.Ellipse): + shape_node = etree.SubElement(g, 'ellipse') + shape_node.set('cx', str(shape.center[0].item())) + shape_node.set('cy', str(shape.center[1].item())) + shape_node.set('rx', str(shape.radius[0].item())) + shape_node.set('ry', str(shape.radius[1].item())) + else: + raise NotImplementedError(f'shape type: {type(shape)} is not involved in pydiffvg.') + + shape_node.set('stroke-width', str(2 * shape.stroke_width.data.cpu().item())) + if shape_group.fill_color is not None: + if isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + shape_node.set('fill', 'url(#shape_{}_fill)'.format(i)) + else: + c = shape_group.fill_color.data.cpu().numpy() + shape_node.set('fill', 'rgb({}, {}, {})'.format( + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('opacity', str(c[3])) + else: + shape_node.set('fill', 'none') + if shape_group.stroke_color is not None: + if isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i)) + else: + c = shape_group.stroke_color.data.cpu().numpy() + shape_node.set('stroke', 'rgb({}, {}, {})'.format( + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('stroke-opacity', str(c[3])) + shape_node.set('stroke-linecap', 'round') + shape_node.set('stroke-linejoin', 'round') + + with open(filename, "w") as f: + f.write(pydiffvg.prettify(root)) + + @staticmethod + def save_image(img, filename, gamma=1): + if torch.is_tensor(img) and torch.device != 'cpu': + img = img.detach().cpu() + pydiffvg.imwrite(img, filename, gamma=gamma) diff --git a/pytorch_svgrender/libs/__init__.py b/pytorch_svgrender/libs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..47480354ce92364882f0bc5d50415a616a5f7d89 --- /dev/null +++ b/pytorch_svgrender/libs/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: a self consistent system, +# including runner, trainer, loss function, EMA, optimizer, lr scheduler , and common utils. + +from .utils import lazy + +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submodules={'engine', 'metric', 'modules', 'solver', 'utils'}, + submod_attrs={} +) + +__version__ = '0.0.1' diff --git a/pytorch_svgrender/libs/engine/__init__.py b/pytorch_svgrender/libs/engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3bb90198a276fe97c8c40b4a6e59ba50651992c9 --- /dev/null +++ b/pytorch_svgrender/libs/engine/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .model_state import ModelState + +__all__ = ['ModelState'] diff --git a/pytorch_svgrender/libs/engine/model_state.py b/pytorch_svgrender/libs/engine/model_state.py new file mode 100644 index 0000000000000000000000000000000000000000..fe520ba567f6b78318562d152ab17808912d6040 --- /dev/null +++ b/pytorch_svgrender/libs/engine/model_state.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from typing import Union, List +from pathlib import Path +from datetime import datetime +import logging + +from omegaconf import OmegaConf, DictConfig +from pprint import pprint +import torch +from accelerate.utils import LoggerType +from accelerate import Accelerator + +from ..utils.logging import get_logger + + +class ModelState: + """ + Handling logger and `hugging face` accelerate training + + features: + - Precision + - Device + - Optimizer + - Logger (default: python system print and logging) + - Monitor (default: wandb, tensorboard) + """ + + def __init__( + self, + args: DictConfig, + log_path_suffix: str = None, + ignore_log=False, # whether to create log file or not + ) -> None: + self.args: DictConfig = args + # set cfg + self.state_cfg = args.state + self.x_cfg = args.x + + """check valid""" + mixed_precision = self.state_cfg.get("mprec") + # Bug: omegaconf convert 'no' to false + mixed_precision = "no" if type(mixed_precision) == bool else mixed_precision + + """create working space""" + # rule: ['./config'. 'method_name', 'exp_name.yaml'] + # -> result_path: ./runs/{method_name}-{exp_name}, as a base folder + now_time = datetime.now().strftime('%Y-%m-%d-%H-%M') + results_folder = self.args.get("result_path", None) + if results_folder is None: + self.result_path = Path("./workdir") / f"{self.x_cfg.method}-{now_time}" + else: + self.result_path = Path(results_folder) / f"{self.x_cfg.method}-{now_time}" + + # update result_path: ./runs/{method_name}-{exp_name}/{log_path_suffix} + # noting: can be understood as "results dir / methods / ablation study / your result" + config_name_only = str(self.x_cfg.method).split(".")[0] + if log_path_suffix is not None: + self.result_path = self.result_path / f"{config_name_only}-{log_path_suffix}" + else: + self.result_path = self.result_path / f"{config_name_only}" + + """init visualized tracker""" + # TODO: monitor with WANDB or TENSORBOARD + self.log_with = [] + # if self.state_cfg.wandb: + # self.log_with.append(LoggerType.WANDB) + # if self.state_cfg.tensorboard: + # self.log_with.append(LoggerType.TENSORBOARD) + + """HuggingFace Accelerator""" + self.accelerator = Accelerator( + device_placement=True, + mixed_precision=mixed_precision, + cpu=True if self.state_cfg.cpu else False, + log_with=None if len(self.log_with) == 0 else self.log_with, + project_dir=self.result_path / "vis", + ) + + """logs""" + if self.accelerator.is_local_main_process: + # logging + self.log = logging.getLogger(__name__) + + # log results in a folder periodically + self.result_path.mkdir(parents=True, exist_ok=True) + if not ignore_log: + self.logger = get_logger( + logs_dir=self.result_path.as_posix(), + file_name=f"{now_time}-{args.seed}-log.txt" + ) + + print("==> system args: ") + sys_cfg = OmegaConf.masked_copy(args, ["x"]) + print(sys_cfg) + print("==> yaml config args: ") + print(self.x_cfg) + + print("\n***** Model State *****") + print(f"-> Mixed Precision: {mixed_precision}, AMP: {self.accelerator.native_amp}") + print(f"-> Weight dtype: {self.weight_dtype}") + + if self.accelerator.scaler_handler is not None and self.accelerator.scaler_handler.enabled: + print(f"-> Enabled GradScaler: {self.accelerator.scaler_handler.to_kwargs()}") + + print(f"-> Working Space: '{self.result_path}'") + + """glob step""" + self.step = 0 + + """log process""" + self.accelerator.wait_for_everyone() + print(f'Process {self.accelerator.process_index} using device: {self.accelerator.device}') + + self.print("-> state initialization complete \n") + + @property + def device(self): + return self.accelerator.device + + @property + def is_main_process(self): + return self.accelerator.is_main_process + + @property + def weight_dtype(self): + weight_dtype = torch.float32 + if self.accelerator.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif self.accelerator.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + return weight_dtype + + @property + def n_gpus(self): + return self.accelerator.num_processes + + @property + def no_decay_params_names(self): + no_decay = [ + "bn", "LayerNorm", "GroupNorm", + ] + return no_decay + + def no_decay_params(self, model, weight_decay): + """optimization tricks""" + optimizer_grouped_parameters = [ + { + "params": [ + p for n, p in model.named_parameters() + if not any(nd in n for nd in self.no_decay_params_names) + ], + "weight_decay": weight_decay, + }, + { + "params": [ + p for n, p in model.named_parameters() + if any(nd in n for nd in self.no_decay_params_names) + ], + "weight_decay": 0.0, + }, + ] + return optimizer_grouped_parameters + + def optimized_params(self, model: torch.nn.Module, verbose=True) -> List: + """return parameters if `requires_grad` is True + + Args: + model: pytorch models + verbose: log optimized parameters + + Examples: + >>> params_optimized = self.optimized_params(uvit, verbose=True) + >>> optimizer = torch.optim.AdamW(params_optimized, lr=1e-3) + + Returns: + a list of parameters + """ + params_optimized = [] + for key, value in model.named_parameters(): + if value.requires_grad: + params_optimized.append(value) + if verbose: + self.print("\t {}, {}, {}".format(key, value.numel(), value.shape)) + return params_optimized + + def save_everything(self, fpath: str): + """Saving and loading the model, optimizer, RNG generators, and the GradScaler.""" + if not self.accelerator.is_main_process: + return + self.accelerator.save_state(fpath) + + def load_save_everything(self, fpath: str): + """Loading the model, optimizer, RNG generators, and the GradScaler.""" + self.accelerator.load_state(fpath) + + def save(self, milestone: Union[str, float, int], checkpoint: object) -> None: + if not self.accelerator.is_main_process: + return + + torch.save(checkpoint, self.result_path / f'model-{milestone}.pt') + + def save_in(self, root: Union[str, Path], checkpoint: object) -> None: + if not self.accelerator.is_main_process: + return + + torch.save(checkpoint, root) + + def load_ckpt_model_only(self, model: torch.nn.Module, path: Union[str, Path], rm_module_prefix: bool = False): + ckpt = torch.load(path, map_location=self.device) + + unwrapped_model = self.accelerator.unwrap_model(model) + if rm_module_prefix: + unwrapped_model.load_state_dict({k.replace('module.', ''): v for k, v in ckpt.items()}) + else: + unwrapped_model.load_state_dict(ckpt) + return unwrapped_model + + def load_shared_weights(self, model: torch.nn.Module, path: Union[str, Path]): + ckpt = torch.load(path, map_location=self.accelerator.device) + self.print(f"pretrained_dict len: {len(ckpt)}") + unwrapped_model = self.accelerator.unwrap_model(model) + model_dict = unwrapped_model.state_dict() + pretrained_dict = {k: v for k, v in ckpt.items() if k in model_dict} + model_dict.update(pretrained_dict) + unwrapped_model.load_state_dict(model_dict, strict=False) + self.print(f"selected pretrained_dict: {len(model_dict)}") + return unwrapped_model + + def print(self, *args, **kwargs): + """Use in replacement of `print()` to only print once per server.""" + self.accelerator.print(*args, **kwargs) + + def pretty_print(self, msg): + if self.accelerator.is_main_process: + pprint(dict(msg)) + + def close_tracker(self): + self.accelerator.end_training() + + def free_memory(self): + self.accelerator.clear() + + def close(self, msg: str = "Training complete."): + """Use in end of training.""" + self.free_memory() + + if torch.cuda.is_available(): + self.print(f'\nGPU memory usage: {torch.cuda.max_memory_reserved() / 1024 ** 3:.2f} GB') + if len(self.log_with) > 0: + self.close_tracker() + self.print(msg) diff --git a/pytorch_svgrender/libs/metric/ImageReward/ImageReward.py b/pytorch_svgrender/libs/metric/ImageReward/ImageReward.py new file mode 100644 index 0000000000000000000000000000000000000000..1ef556061c770a6f6293d1b259957da77bca6802 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/ImageReward.py @@ -0,0 +1,175 @@ +''' +@File : ImageReward.py +@Time : 2023/01/28 19:53:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: ImageReward Reward model. +* Based on CLIP code base and improved-aesthetic-predictor code base +* https://github.com/openai/CLIP +* https://github.com/christophschuhmann/improved-aesthetic-predictor +''' + +import os +import torch +import torch.nn as nn +from PIL import Image +from .models.BLIP.blip_pretrain import BLIP_Pretrain +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +class MLP(nn.Module): + def __init__(self, input_size): + super().__init__() + self.input_size = input_size + + self.layers = nn.Sequential( + nn.Linear(self.input_size, 1024), + #nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(1024, 128), + #nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(128, 64), + #nn.ReLU(), + nn.Dropout(0.1), + nn.Linear(64, 16), + #nn.ReLU(), + nn.Linear(16, 1) + ) + + # initial MLP param + for name, param in self.layers.named_parameters(): + if 'weight' in name: + nn.init.normal_(param, mean=0.0, std=1.0/(self.input_size+1)) + if 'bias' in name: + nn.init.constant_(param, val=0) + + def forward(self, input): + return self.layers(input) + + +class ImageReward(nn.Module): + def __init__(self, med_config, device='cpu'): + super().__init__() + self.device = device + + self.blip = BLIP_Pretrain(image_size=224, vit='large', med_config=med_config) + self.preprocess = _transform(224) + self.mlp = MLP(768) + + self.mean = 0.16717362830052426 + self.std = 1.0333394966054072 + + + def score_gard(self, prompt_ids, prompt_attention_mask, image): + + image_embeds = self.blip.visual_encoder(image) + # text encode cross attention with image + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(self.device) + text_output = self.blip.text_encoder(prompt_ids, + attention_mask = prompt_attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + ) + + txt_features = text_output.last_hidden_state[:,0,:] # (feature_dim) + rewards = self.mlp(txt_features) + rewards = (rewards - self.mean) / self.std + + return rewards + + + def score(self, prompt, image): + + if (type(image).__name__=='list'): + _, rewards = self.inference_rank(prompt, image) + return rewards + + # text encode + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) + + # image encode + if isinstance(image, Image.Image): + pil_image = image + elif isinstance(image, str): + if os.path.isfile(image): + pil_image = Image.open(image) + else: + raise TypeError(r'This image parameter type has not been supportted yet. Please pass PIL.Image or file path str.') + + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + + # text encode cross attention with image + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, + attention_mask = text_input.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + ) + + txt_features = text_output.last_hidden_state[:,0,:].float() # (feature_dim) + rewards = self.mlp(txt_features) + rewards = (rewards - self.mean) / self.std + + return rewards.detach().cpu().numpy().item() + + + def inference_rank(self, prompt, generations_list): + + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) + + txt_set = [] + for generation in generations_list: + # image encode + if isinstance(generation, Image.Image): + pil_image = generation + elif isinstance(generation, str): + if os.path.isfile(generation): + pil_image = Image.open(generation) + else: + raise TypeError(r'This image parameter type has not been supportted yet. Please pass PIL.Image or file path str.') + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + + # text encode cross attention with image + image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, + attention_mask = text_input.attention_mask, + encoder_hidden_states = image_embeds, + encoder_attention_mask = image_atts, + return_dict = True, + ) + txt_set.append(text_output.last_hidden_state[:,0,:]) + + txt_features = torch.cat(txt_set, 0).float() # [image_num, feature_dim] + rewards = self.mlp(txt_features) # [image_num, 1] + rewards = (rewards - self.mean) / self.std + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/ReFL.py b/pytorch_svgrender/libs/metric/ImageReward/ReFL.py new file mode 100644 index 0000000000000000000000000000000000000000..7c1aa88f063ce4dc5a4801da479d963f885c466d --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/ReFL.py @@ -0,0 +1,824 @@ +''' +@File : ReFL.py +@Time : 2023/05/01 19:36:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: ReFL Algorithm. +* Based on diffusers code base +* https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py +''' + +import argparse +import logging +import math +import os +import random +from pathlib import Path + +import accelerate +import numpy as np +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +import transformers +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import ProjectConfiguration, set_seed +from datasets import load_dataset +from huggingface_hub import create_repo, upload_folder +from packaging import version +from tqdm.auto import tqdm +from transformers import CLIPTextModel, CLIPTokenizer + +from PIL import Image +import ImageReward as RM + +from torchvision.transforms import Compose, Resize, CenterCrop, Normalize +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + +import diffusers +from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel +from diffusers.optimization import get_scheduler +from diffusers.training_utils import EMAModel +from diffusers.utils import check_min_version, deprecate, is_wandb_available +from diffusers.utils.import_utils import is_xformers_available + + +if is_wandb_available(): + import wandb + + +# Will error if the minimal version of diffusers is not installed. Remove at your own risks. +check_min_version("0.16.0.dev0") + +logger = get_logger(__name__, log_level="INFO") + +DATASET_NAME_MAPPING = { + "refl": ("image", "text"), +} + + +def parse_args(): + parser = argparse.ArgumentParser(description="Simple example of a training script.") + parser.add_argument( + "--grad_scale", type=float, default=1e-3, help="Scale divided for grad loss value." + ) + parser.add_argument( + "--input_pertubation", type=float, default=0, help="The scale of input pretubation. Recommended 0.1." + ) + parser.add_argument( + "--revision", + type=str, + default=None, + required=False, + help="Revision of pretrained model identifier from huggingface.co/models.", + ) + parser.add_argument( + "--dataset_name", + type=str, + default=None, + help=( + "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," + " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," + " or to a folder containing files that 🤗 Datasets can understand." + ), + ) + parser.add_argument( + "--dataset_config_name", + type=str, + default=None, + help="The config of the Dataset, leave as None if there's only one config.", + ) + parser.add_argument( + "--image_column", type=str, default="image", help="The column of the dataset containing an image." + ) + parser.add_argument( + "--caption_column", + type=str, + default="text", + help="The column of the dataset containing a caption or a list of captions.", + ) + parser.add_argument( + "--max_train_samples", + type=int, + default=None, + help=( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ), + ) + parser.add_argument( + "--validation_prompts", + type=str, + default=None, + nargs="+", + help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), + ) + parser.add_argument( + "--output_dir", + type=str, + default="checkpoint/refl", + help="The output directory where the model predictions and checkpoints will be written.", + ) + parser.add_argument( + "--cache_dir", + type=str, + default=None, + help="The directory where the downloaded models and datasets will be stored.", + ) + parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") + parser.add_argument( + "--resolution", + type=int, + default=512, + help=( + "The resolution for input images, all the images in the train/validation dataset will be resized to this" + " resolution" + ), + ) + parser.add_argument( + "--center_crop", + default=False, + action="store_true", + help=( + "Whether to center crop the input images to the resolution. If not set, the images will be randomly" + " cropped. The images will be resized to the resolution first before cropping." + ), + ) + parser.add_argument( + "--random_flip", + action="store_true", + help="whether to randomly flip images horizontally", + ) + parser.add_argument( + "--train_batch_size", type=int, default=2, help="Batch size (per device) for the training dataloader." + ) + parser.add_argument("--num_train_epochs", type=int, default=100) + parser.add_argument( + "--max_train_steps", + type=int, + default=100, + help="Total number of training steps to perform. If provided, overrides num_train_epochs.", + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=4, + help="Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--gradient_checkpointing", + action="store_true", + help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-5, + help="Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument( + "--scale_lr", + action="store_true", + default=False, + help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", + ) + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help=( + 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' + ' "constant", "constant_with_warmup"]' + ), + ) + parser.add_argument( + "--lr_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." + ) + parser.add_argument( + "--snr_gamma", + type=float, + default=None, + help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " + "More details here: https://arxiv.org/abs/2303.09556.", + ) + parser.add_argument( + "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." + ) + parser.add_argument( + "--allow_tf32", + action="store_true", + help=( + "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" + " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" + ), + ) + parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") + parser.add_argument( + "--non_ema_revision", + type=str, + default=None, + required=False, + help=( + "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" + " remote repository specified with --pretrained_model_name_or_path." + ), + ) + parser.add_argument( + "--dataloader_num_workers", + type=int, + default=0, + help=( + "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." + ), + ) + parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") + parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") + parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") + parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") + parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") + parser.add_argument( + "--hub_model_id", + type=str, + default=None, + help="The name of the repository to keep in sync with the local `output_dir`.", + ) + parser.add_argument( + "--logging_dir", + type=str, + default="logs", + help=( + "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" + " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." + ), + ) + parser.add_argument( + "--mixed_precision", + type=str, + default=None, + choices=["no", "fp16", "bf16"], + help=( + "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" + " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" + " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." + ), + ) + parser.add_argument( + "--report_to", + type=str, + default="tensorboard", + help=( + 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' + ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' + ), + ) + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument( + "--checkpointing_steps", + type=int, + default=100, + help=( + "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" + " training using `--resume_from_checkpoint`." + ), + ) + parser.add_argument( + "--checkpoints_total_limit", + type=int, + default=None, + help=( + "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." + " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" + " for more docs" + ), + ) + parser.add_argument( + "--resume_from_checkpoint", + type=str, + default=None, + help=( + "Whether training should be resumed from a previous checkpoint. Use a path saved by" + ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' + ), + ) + parser.add_argument( + "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." + ) + parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") + parser.add_argument( + "--validation_epochs", + type=int, + default=5, + help="Run validation every X epochs.", + ) + parser.add_argument( + "--tracker_project_name", + type=str, + default="text2image-refl", + help=( + "The `project_name` argument passed to Accelerator.init_trackers for" + " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" + ), + ) + + args = parser.parse_args() + env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) + if env_local_rank != -1 and env_local_rank != args.local_rank: + args.local_rank = env_local_rank + + # default to using the same revision for the non-ema model if not specified + if args.non_ema_revision is None: + args.non_ema_revision = args.revision + + return args + + +class Trainer(object): + + def __init__(self, pretrained_model_name_or_path, train_data_dir, args): + + self.pretrained_model_name_or_path = pretrained_model_name_or_path + self.train_data_dir = train_data_dir + + # Sanity checks + if args.dataset_name is None and self.train_data_dir is None: + raise ValueError("Need either a dataset name or a training folder.") + + if args.non_ema_revision is not None: + deprecate( + "non_ema_revision!=None", + "0.15.0", + message=( + "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" + " use `--variant=non_ema` instead." + ), + ) + logging_dir = os.path.join(args.output_dir, args.logging_dir) + + accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) + + self.accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=args.report_to, + logging_dir=logging_dir, + project_config=accelerator_project_config, + ) + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + logger.info(self.accelerator.state, main_process_only=False) + if self.accelerator.is_local_main_process: + transformers.utils.logging.set_verbosity_warning() + diffusers.utils.logging.set_verbosity_info() + else: + transformers.utils.logging.set_verbosity_error() + diffusers.utils.logging.set_verbosity_error() + + # If passed along, set the training seed now. + if args.seed is not None: + set_seed(args.seed) + + # Handle the repository creation + if self.accelerator.is_main_process: + if args.output_dir is not None: + os.makedirs(args.output_dir, exist_ok=True) + + if args.push_to_hub: + self.repo_id = create_repo( + repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token + ).repo_id + + # Load scheduler, tokenizer and models. + self.noise_scheduler = DDPMScheduler.from_pretrained(self.pretrained_model_name_or_path, subfolder="scheduler") + tokenizer = CLIPTokenizer.from_pretrained( + self.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision + ) + self.text_encoder = CLIPTextModel.from_pretrained( + self.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision + ) + self.vae = AutoencoderKL.from_pretrained(self.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) + self.unet = UNet2DConditionModel.from_pretrained( + self.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision + ) + self.reward_model = RM.load("ImageReward-v1.0", device=self.accelerator.device) + + # Freeze vae and text_encoder + self.vae.requires_grad_(False) + self.text_encoder.requires_grad_(False) + self.reward_model.requires_grad_(False) + + # Create EMA for the unet. + if args.use_ema: + self.ema_unet = UNet2DConditionModel.from_pretrained( + self.pretrained_model_name_or_path, subfolder="unet", revision=args.revision + ) + self.ema_unet = EMAModel(self.ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=self.ema_unet.config) + + if args.enable_xformers_memory_efficient_attention: + if is_xformers_available(): + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse("0.0.16"): + logger.warn( + "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." + ) + self.unet.enable_xformers_memory_efficient_attention() + else: + raise ValueError("xformers is not available. Make sure it is installed correctly") + + # `accelerate` 0.16.0 will have better support for customized saving + if version.parse(accelerate.__version__) >= version.parse("0.16.0"): + # create custom saving & loading hooks so that `self.accelerator.save_state(...)` serializes in a nice format + def save_model_hook(models, weights, output_dir): + if args.use_ema: + self.ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) + + for i, model in enumerate(models): + model.save_pretrained(os.path.join(output_dir, "unet")) + + # make sure to pop weight so that corresponding model is not saved again + weights.pop() + + def load_model_hook(models, input_dir): + if args.use_ema: + load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) + self.ema_unet.load_state_dict(load_model.state_dict()) + self.ema_unet.to(self.accelerator.device) + del load_model + + for i in range(len(models)): + # pop models so that they are not loaded again + model = models.pop() + + # load diffusers style into model + load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") + model.register_to_config(**load_model.config) + + model.load_state_dict(load_model.state_dict()) + del load_model + + self.accelerator.register_save_state_pre_hook(save_model_hook) + self.accelerator.register_load_state_pre_hook(load_model_hook) + + if args.gradient_checkpointing: + self.unet.enable_gradient_checkpointing() + + # Enable TF32 for faster training on Ampere GPUs, + # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices + if args.allow_tf32: + torch.backends.cuda.matmul.allow_tf32 = True + + if args.scale_lr: + args.learning_rate = ( + args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * self.accelerator.num_processes + ) + + # Initialize the optimizer + if args.use_8bit_adam: + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError( + "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" + ) + + optimizer_cls = bnb.optim.AdamW8bit + else: + optimizer_cls = torch.optim.AdamW + + self.optimizer = optimizer_cls( + self.unet.parameters(), + lr=args.learning_rate, + betas=(args.adam_beta1, args.adam_beta2), + weight_decay=args.adam_weight_decay, + eps=args.adam_epsilon, + ) + + # Get the datasets: you can either provide your own training and evaluation files (see below) + # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). + + # In distributed training, the load_dataset function guarantees that only one local process can concurrently + # download the dataset. + if args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + dataset = load_dataset( + args.dataset_name, + args.dataset_config_name, + cache_dir=args.cache_dir, + ) + else: + data_files = {} + data_files["train"] = self.train_data_dir + dataset = load_dataset( + "json", + data_files=data_files, + cache_dir=args.cache_dir, + ) + # See more about loading custom images at + # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + column_names = dataset["train"].column_names + + # Get the column names for input/target. + dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) + if args.image_column is None: + image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] + else: + image_column = args.image_column + if image_column not in column_names: + raise ValueError( + f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" + ) + if args.caption_column is None: + caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] + else: + caption_column = args.caption_column + if caption_column not in column_names: + raise ValueError( + f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" + ) + + # Preprocessing the datasets. + # We need to tokenize input captions and transform the images. + def tokenize_captions(examples, is_train=True): + captions = [] + for caption in examples[caption_column]: + if isinstance(caption, str): + captions.append(caption) + elif isinstance(caption, (list, np.ndarray)): + # take a random caption if there are multiple + captions.append(random.choice(caption) if is_train else caption[0]) + else: + raise ValueError( + f"Caption column `{caption_column}` should contain either strings or lists of strings." + ) + inputs = tokenizer( + captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" + ) + return inputs.input_ids + + def preprocess_train(examples): + examples["input_ids"] = tokenize_captions(examples) + examples["rm_input_ids"] = self.reward_model.blip.tokenizer(examples[caption_column], padding='max_length', truncation=True, max_length=35, return_tensors="pt").input_ids + examples["rm_attention_mask"] = self.reward_model.blip.tokenizer(examples[caption_column], padding='max_length', truncation=True, max_length=35, return_tensors="pt").attention_mask + return examples + + with self.accelerator.main_process_first(): + if args.max_train_samples is not None: + dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) + # Set the training transforms + self.train_dataset = dataset["train"].with_transform(preprocess_train) + + def collate_fn(examples): + input_ids = torch.stack([example["input_ids"] for example in examples]) + rm_input_ids = torch.stack([example["rm_input_ids"] for example in examples]) + rm_attention_mask = torch.stack([example["rm_attention_mask"] for example in examples]) + input_ids = input_ids.view(-1, input_ids.shape[-1]) + rm_input_ids = rm_input_ids.view(-1, rm_input_ids.shape[-1]) + rm_attention_mask = rm_attention_mask.view(-1, rm_attention_mask.shape[-1]) + return {"input_ids": input_ids, "rm_input_ids": rm_input_ids, "rm_attention_mask": rm_attention_mask} + + # DataLoaders creation: + self.train_dataloader = torch.utils.data.DataLoader( + self.train_dataset, + shuffle=True, + collate_fn=collate_fn, + batch_size=args.train_batch_size, + num_workers=args.dataloader_num_workers, + ) + + # Scheduler and math around the number of training steps. + overrode_max_train_steps = False + self.num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / args.gradient_accumulation_steps) + if args.max_train_steps is None: + args.max_train_steps = args.num_train_epochs * self.num_update_steps_per_epoch + overrode_max_train_steps = True + + self.lr_scheduler = get_scheduler( + args.lr_scheduler, + optimizer=self.optimizer, + num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + ) + + # Prepare everything with our `self.accelerator`. + self.unet, self.optimizer, self.train_dataloader, self.lr_scheduler = self.accelerator.prepare( + self.unet, self.optimizer, self.train_dataloader, self.lr_scheduler + ) + + if args.use_ema: + self.ema_unet.to(self.accelerator.device) + + # For mixed precision training we cast the text_encoder and vae weights to half-precision + # as these models are only used for inference, keeping weights in full precision is not required. + self.weight_dtype = torch.float32 + if self.accelerator.mixed_precision == "fp16": + self.weight_dtype = torch.float16 + elif self.accelerator.mixed_precision == "bf16": + self.weight_dtype = torch.bfloat16 + + # Move text_encode and vae to gpu and cast to self.weight_dtype + self.text_encoder.to(self.accelerator.device, dtype=self.weight_dtype) + self.vae.to(self.accelerator.device, dtype=self.weight_dtype) + self.reward_model.to(self.accelerator.device, dtype=self.weight_dtype) + + # We need to recalculate our total training steps as the size of the training dataloader may have changed. + self.num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / args.gradient_accumulation_steps) + if overrode_max_train_steps: + args.max_train_steps = args.num_train_epochs * self.num_update_steps_per_epoch + # Afterwards we recalculate our number of training epochs + args.num_train_epochs = math.ceil(args.max_train_steps / self.num_update_steps_per_epoch) + + # We need to initialize the trackers we use, and also store our configuration. + # The trackers initializes automatically on the main process. + if self.accelerator.is_main_process: + tracker_config = dict(vars(args)) + tracker_config.pop("validation_prompts") + self.accelerator.init_trackers(args.tracker_project_name, tracker_config) + + + def train(self, args): + + # Train! + total_batch_size = args.train_batch_size * self.accelerator.num_processes * args.gradient_accumulation_steps + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len(self.train_dataset)}") + logger.info(f" Num Epochs = {args.num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {args.max_train_steps}") + global_step = 0 + first_epoch = 0 + + # Potentially load in the weights and states from a previous save + if args.resume_from_checkpoint: + if args.resume_from_checkpoint != "latest": + path = os.path.basename(args.resume_from_checkpoint) + else: + # Get the most recent checkpoint + dirs = os.listdir(args.output_dir) + dirs = [d for d in dirs if d.startswith("checkpoint")] + dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) + path = dirs[-1] if len(dirs) > 0 else None + + if path is None: + self.accelerator.print( + f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." + ) + args.resume_from_checkpoint = None + else: + self.accelerator.print(f"Resuming from checkpoint {path}") + self.accelerator.load_state(os.path.join(args.output_dir, path)) + global_step = int(path.split("-")[1]) + + resume_global_step = global_step * args.gradient_accumulation_steps + first_epoch = global_step // self.num_update_steps_per_epoch + resume_step = resume_global_step % (self.num_update_steps_per_epoch * args.gradient_accumulation_steps) + + # Only show the progress bar once on each machine. + progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not self.accelerator.is_local_main_process) + progress_bar.set_description("Steps") + + for epoch in range(first_epoch, args.num_train_epochs): + self.unet.train() + train_loss = 0.0 + for step, batch in enumerate(self.train_dataloader): + # Skip steps until we reach the resumed step + if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: + if step % args.gradient_accumulation_steps == 0: + progress_bar.update(1) + continue + + with self.accelerator.accumulate(self.unet): + encoder_hidden_states = self.text_encoder(batch["input_ids"])[0] + latents = torch.randn((args.train_batch_size, 4, 64, 64), device=self.accelerator.device) + + self.noise_scheduler.set_timesteps(40, device=self.accelerator.device) + timesteps = self.noise_scheduler.timesteps + + mid_timestep = random.randint(30, 39) + + for i, t in enumerate(timesteps[:mid_timestep]): + with torch.no_grad(): + latent_model_input = latents + latent_model_input = self.noise_scheduler.scale_model_input(latent_model_input, t) + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=encoder_hidden_states, + ).sample + latents = self.noise_scheduler.step(noise_pred, t, latents).prev_sample + + latent_model_input = latents + latent_model_input = self.noise_scheduler.scale_model_input(latent_model_input, timesteps[mid_timestep]) + noise_pred = self.unet( + latent_model_input, + timesteps[mid_timestep], + encoder_hidden_states=encoder_hidden_states, + ).sample + pred_original_sample = self.noise_scheduler.step(noise_pred, timesteps[mid_timestep], latents).pred_original_sample.to(self.weight_dtype) + + pred_original_sample = 1 / self.vae.config.scaling_factor * pred_original_sample + image = self.vae.decode(pred_original_sample.to(self.weight_dtype)).sample + image = (image / 2 + 0.5).clamp(0, 1) + + # image encode + def _transform(): + return Compose([ + Resize(224, interpolation=BICUBIC), + CenterCrop(224), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + rm_preprocess = _transform() + image = rm_preprocess(image).to(self.accelerator.device) + + rewards = self.reward_model.score_gard(batch["rm_input_ids"], batch["rm_attention_mask"], image) + loss = F.relu(-rewards+2) + loss = loss.mean() * args.grad_scale + + # Gather the losses across all processes for logging (if we use distributed training). + avg_loss = self.accelerator.gather(loss.repeat(args.train_batch_size)).mean() + train_loss += avg_loss.item() / args.gradient_accumulation_steps + + # Backpropagate + self.accelerator.backward(loss) + if self.accelerator.sync_gradients: + self.accelerator.clip_grad_norm_(self.unet.parameters(), args.max_grad_norm) + self.optimizer.step() + self.lr_scheduler.step() + self.optimizer.zero_grad() + + # Checks if the self.accelerator has performed an optimization step behind the scenes + if self.accelerator.sync_gradients: + if args.use_ema: + self.ema_unet.step(self.unet.parameters()) + progress_bar.update(1) + global_step += 1 + self.accelerator.log({"train_loss": train_loss}, step=global_step) + train_loss = 0.0 + + if global_step % args.checkpointing_steps == 0: + if self.accelerator.is_main_process: + save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") + self.accelerator.save_state(save_path) + logger.info(f"Saved state to {save_path}") + + logs = {"step_loss": loss.detach().item(), "lr": self.lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if global_step >= args.max_train_steps: + break + + if self.accelerator.is_main_process: + if args.validation_prompts is not None and epoch % args.validation_epochs == 0: + if args.use_ema: + # Store the UNet parameters temporarily and load the EMA parameters to perform inference. + self.ema_unet.store(self.unet.parameters()) + self.ema_unet.copy_to(self.unet.parameters()) + if args.use_ema: + # Switch back to the original UNet parameters. + self.ema_unet.restore(self.unet.parameters()) + + # Create the pipeline using the trained modules and save it. + self.accelerator.wait_for_everyone() + if self.accelerator.is_main_process: + self.unet = self.accelerator.unwrap_model(self.unet) + if args.use_ema: + self.ema_unet.copy_to(self.unet.parameters()) + + pipeline = StableDiffusionPipeline.from_pretrained( + self.pretrained_model_name_or_path, + text_encoder=self.text_encoder, + vae=self.vae, + unet=self.unet, + revision=args.revision, + ) + pipeline.save_pretrained(args.output_dir) + + if args.push_to_hub: + upload_folder( + repo_id=self.repo_id, + folder_path=args.output_dir, + commit_message="End of training", + ignore_patterns=["step_*", "epoch_*"], + ) + + self.accelerator.end_training() + diff --git a/pytorch_svgrender/libs/metric/ImageReward/__init__.py b/pytorch_svgrender/libs/metric/ImageReward/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ec7d51d8e7417474542f05883398390c37a6ba6 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/__init__.py @@ -0,0 +1,3 @@ +from .utils import * +from .models import * +from .ReFL import * \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/AestheticScore.py b/pytorch_svgrender/libs/metric/ImageReward/models/AestheticScore.py new file mode 100644 index 0000000000000000000000000000000000000000..2ba79c3191ce7735a1db2215302aa100af44fd01 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/AestheticScore.py @@ -0,0 +1,95 @@ +''' +@File : AestheticScore.py +@Time : 2023/02/12 14:54:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: AestheticScore. +* Based on improved-aesthetic-predictor code base +* https://github.com/christophschuhmann/improved-aesthetic-predictor +''' + +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +import clip + + +# if you changed the MLP architecture during training, change it also here: +class MLP(nn.Module): + def __init__(self, input_size): + super().__init__() + self.input_size = input_size + self.layers = nn.Sequential( + nn.Linear(self.input_size, 1024), + #nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(1024, 128), + #nn.ReLU(), + nn.Dropout(0.2), + nn.Linear(128, 64), + #nn.ReLU(), + nn.Dropout(0.1), + + nn.Linear(64, 16), + #nn.ReLU(), + + nn.Linear(16, 1) + ) + + def forward(self, x): + return self.layers(x) + + +class AestheticScore(nn.Module): + def __init__(self, download_root, device='cpu'): + super().__init__() + self.device = device + self.clip_model, self.preprocess = clip.load("ViT-L/14", device=self.device, jit=False, + download_root=download_root) + self.mlp = MLP(768) + + if device == "cpu": + self.clip_model.float() + else: + clip.model.convert_weights(self.clip_model) # Actually this line is unnecessary since clip by default already on float16 + + # have clip.logit_scale require no grad. + self.clip_model.logit_scale.requires_grad_(False) + + def score(self, prompt, image_path): + + if (type(image_path).__name__=='list'): + _, rewards = self.inference_rank(prompt, image_path) + return rewards + + # image encode + pil_image = Image.open(image_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)).float() + + # score + rewards = self.mlp(image_features) + + return rewards.detach().cpu().numpy().item() + + def inference_rank(self, prompt, generations_list): + + img_set = [] + for generations in generations_list: + # image encode + img_path = generations + pil_image = Image.open(img_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)) + img_set.append(image_features) + + img_features = torch.cat(img_set, 0).float() # [image_num, feature_dim] + rewards = self.mlp(img_features) + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/__init__.py b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0a617e7dda333d40ed10207f44ccc3857fb18ad4 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/__init__.py @@ -0,0 +1 @@ +from .blip_pretrain import * \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/blip.py b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/blip.py new file mode 100644 index 0000000000000000000000000000000000000000..0dfdb72ab619587b62357904349358b221f631e4 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/blip.py @@ -0,0 +1,70 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) +''' + +import warnings +warnings.filterwarnings("ignore") + +import torch +import os +from urllib.parse import urlparse +from timm.models.hub import download_cached_file +from transformers import BertTokenizer +from .vit import VisionTransformer, interpolate_pos_embed + + +def init_tokenizer(): + tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') + tokenizer.add_special_tokens({'bos_token':'[DEC]'}) + tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']}) + tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] + return tokenizer + + +def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): + + assert vit in ['base', 'large'], "vit parameter must be base or large" + if vit=='base': + vision_width = 768 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, + num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0 or drop_path_rate + ) + elif vit=='large': + vision_width = 1024 + visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, + num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, + drop_path_rate=0.1 or drop_path_rate + ) + return visual_encoder, vision_width + + +def is_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + +def load_checkpoint(model,url_or_filename): + if is_url(url_or_filename): + cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True) + checkpoint = torch.load(cached_file, map_location='cpu') + elif os.path.isfile(url_or_filename): + checkpoint = torch.load(url_or_filename, map_location='cpu') + else: + raise RuntimeError('checkpoint url or path is invalid') + + state_dict = checkpoint['model'] + + state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder) + if 'visual_encoder_m.pos_embed' in model.state_dict().keys(): + state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'], + model.visual_encoder_m) + for key in model.state_dict().keys(): + if key in state_dict.keys(): + if state_dict[key].shape!=model.state_dict()[key].shape: + print(key, ": ", state_dict[key].shape, ', ', model.state_dict()[key].shape) + del state_dict[key] + + msg = model.load_state_dict(state_dict,strict=False) + print('load checkpoint from %s'%url_or_filename) + return model,msg + diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/blip_pretrain.py b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/blip_pretrain.py new file mode 100644 index 0000000000000000000000000000000000000000..793cb07944810eebe1d28f26aa19482b0abcf0a5 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/blip_pretrain.py @@ -0,0 +1,43 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) +''' + +import transformers +transformers.logging.set_verbosity_error() + +from torch import nn +import os +from .med import BertConfig, BertModel +from .blip import create_vit, init_tokenizer + +class BLIP_Pretrain(nn.Module): + def __init__(self, + med_config = "med_config.json", + image_size = 224, + vit = 'base', + vit_grad_ckpt = False, + vit_ckpt_layer = 0, + embed_dim = 256, + queue_size = 57600, + momentum = 0.995, + ): + """ + Args: + med_config (str): path for the mixture of encoder-decoder model's configuration file + image_size (int): input image size + vit (str): model size of vision transformer + """ + super().__init__() + + self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer, 0) + + self.tokenizer = init_tokenizer() + encoder_config = BertConfig.from_json_file(med_config) + encoder_config.encoder_width = vision_width + self.text_encoder = BertModel(config=encoder_config, add_pooling_layer=False) + + text_width = self.text_encoder.config.hidden_size + + self.vision_proj = nn.Linear(vision_width, embed_dim) + self.text_proj = nn.Linear(text_width, embed_dim) + diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/med.py b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/med.py new file mode 100644 index 0000000000000000000000000000000000000000..426f4689833d988526c6e26cd627f30975ab7606 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/med.py @@ -0,0 +1,947 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +''' + +import math +from typing import Tuple + +import torch +from torch import Tensor, device, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads) + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention: + self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if mode=='multimodal': + assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warn( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """ Initialize the weights """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + + def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, + device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + if reduction=='none': + lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/vit.py b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/vit.py new file mode 100644 index 0000000000000000000000000000000000000000..7e5cf430090956461bc64d5ccbe427a71f50f5f2 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/BLIP/vit.py @@ -0,0 +1,301 @@ +''' + * Adapted from BLIP (https://github.com/salesforce/BLIP) + * Based on timm code base + * https://github.com/rwightman/pytorch-image-models/tree/master/timm +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from functools import partial + +from timm.models.vision_transformer import _cfg, PatchEmbed +from timm.models.registry import register_model +from timm.models.layers import trunc_normal_, DropPath +from timm.models.helpers import named_apply, adapt_input_conv + +from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper + +class Mlp(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim ** -0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.attn_gradients = None + self.attention_map = None + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def forward(self, x, register_hook=False): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + if register_hook: + self.save_attention_map(attn) + attn.register_hook(self.save_attn_gradients) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + if use_grad_checkpointing: + self.attn = checkpoint_wrapper(self.attn) + self.mlp = checkpoint_wrapper(self.mlp) + + def forward(self, x, register_hook=False): + x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class VisionTransformer(nn.Module): + """ Vision Transformer + A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - + https://arxiv.org/abs/2010.11929 + """ + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, + use_grad_checkpointing=False, ckpt_layer=0): + """ + Args: + img_size (int, tuple): input image size + patch_size (int, tuple): patch size + in_chans (int): number of input channels + num_classes (int): number of classes for classification head + embed_dim (int): embedding dimension + depth (int): depth of transformer + num_heads (int): number of attention heads + mlp_ratio (int): ratio of mlp hidden dim to embedding dim + qkv_bias (bool): enable bias for qkv if True + qk_scale (float): override default qk scale of head_dim ** -0.5 if set + representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set + drop_rate (float): dropout rate + attn_drop_rate (float): attention dropout rate + drop_path_rate (float): stochastic depth rate + norm_layer: (nn.Module): normalization layer + """ + super().__init__() + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList([ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, + use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) + ) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + trunc_normal_(self.pos_embed, std=.02) + trunc_normal_(self.cls_token, std=.02) + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'pos_embed', 'cls_token'} + + def forward(self, x, register_blk=-1): + B = x.shape[0] + x = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks + x = torch.cat((cls_tokens, x), dim=1) + + x = x + self.pos_embed[:,:x.size(1),:] + x = self.pos_drop(x) + + for i,blk in enumerate(self.blocks): + x = blk(x, register_blk==i) + x = self.norm(x) + + return x + + @torch.jit.ignore() + def load_pretrained(self, checkpoint_path, prefix=''): + _load_weights(self, checkpoint_path, prefix) + + +@torch.no_grad() +def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): + """ Load weights from .npz checkpoints for official Google Brain Flax implementation + """ + import numpy as np + + def _n2p(w, t=True): + if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: + w = w.flatten() + if t: + if w.ndim == 4: + w = w.transpose([3, 2, 0, 1]) + elif w.ndim == 3: + w = w.transpose([2, 0, 1]) + elif w.ndim == 2: + w = w.transpose([1, 0]) + return torch.from_numpy(w) + + w = np.load(checkpoint_path) + if not prefix and 'opt/target/embedding/kernel' in w: + prefix = 'opt/target/' + + if hasattr(model.patch_embed, 'backbone'): + # hybrid + backbone = model.patch_embed.backbone + stem_only = not hasattr(backbone, 'stem') + stem = backbone if stem_only else backbone.stem + stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) + stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) + stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) + if not stem_only: + for i, stage in enumerate(backbone.stages): + for j, block in enumerate(stage.blocks): + bp = f'{prefix}block{i + 1}/unit{j + 1}/' + for r in range(3): + getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) + getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) + getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) + if block.downsample is not None: + block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) + block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) + block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) + embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) + else: + embed_conv_w = adapt_input_conv( + model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) + model.patch_embed.proj.weight.copy_(embed_conv_w) + model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) + model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) + pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) + if pos_embed_w.shape != model.pos_embed.shape: + pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights + pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) + model.pos_embed.copy_(pos_embed_w) + model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) + model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) +# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: +# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) +# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) +# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: +# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) +# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) + for i, block in enumerate(model.blocks.children()): + block_prefix = f'{prefix}Transformer/encoderblock_{i}/' + mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' + block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) + block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) + block.attn.qkv.weight.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) + block.attn.qkv.bias.copy_(torch.cat([ + _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) + block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) + block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) + for r in range(2): + getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) + getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) + block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) + block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) + + +def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): + # interpolate position embedding + embedding_size = pos_embed_checkpoint.shape[-1] + num_patches = visual_encoder.patch_embed.num_patches + num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches + # height (== width) for the checkpoint position embedding + orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) + # height (== width) for the new position embedding + new_size = int(num_patches ** 0.5) + + if orig_size!=new_size: + # class_token and dist_token are kept unchanged + extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] + # only the position tokens are interpolated + pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] + pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) + pos_tokens = torch.nn.functional.interpolate( + pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) + pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) + new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) + print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) + + return new_pos_embed + else: + return pos_embed_checkpoint \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/BLIPScore.py b/pytorch_svgrender/libs/metric/ImageReward/models/BLIPScore.py new file mode 100644 index 0000000000000000000000000000000000000000..2add1269ee9a09624641929c12594ac53e6188d6 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/BLIPScore.py @@ -0,0 +1,99 @@ +''' +@File : BLIPScore.py +@Time : 2023/02/19 20:48:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: BLIPScore. +* Based on BLIP code base +* https://github.com/salesforce/BLIP +''' + +import os +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +from libs.metric.ImageReward.models.BLIP.blip import load_checkpoint +from libs.metric.ImageReward.models.BLIP.blip_pretrain import BLIP_Pretrain +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize + +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +def _convert_image_to_rgb(image): + return image.convert("RGB") + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=BICUBIC), + CenterCrop(n_px), + _convert_image_to_rgb, + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +class BLIPScore(nn.Module): + def __init__(self, med_config, device='cpu'): + super().__init__() + self.device = device + + self.preprocess = _transform(224) + self.blip = BLIP_Pretrain(image_size=224, vit='large', med_config=med_config) + + + def score(self, prompt, image_path): + + if (type(image_path).__name__=='list'): + _, rewards = self.inference_rank(prompt, image_path) + return rewards + + # text encode + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text') + txt_feature = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:])) + + # image encode + pil_image = Image.open(image_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + image_features = F.normalize(self.blip.vision_proj(image_embeds[:,0,:]), dim=-1) + + # score + rewards = torch.sum(torch.mul(txt_feature, image_features), dim=1, keepdim=True) + + return rewards.detach().cpu().numpy().item() + + + def inference_rank(self, prompt, generations_list): + + text_input = self.blip.tokenizer(prompt, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device) + text_output = self.blip.text_encoder(text_input.input_ids, attention_mask = text_input.attention_mask, mode='text') + txt_feature = F.normalize(self.blip.text_proj(text_output.last_hidden_state[:,0,:])) + + txt_set = [] + img_set = [] + for generations in generations_list: + # image encode + img_path = generations + pil_image = Image.open(img_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_embeds = self.blip.visual_encoder(image) + image_features = F.normalize(self.blip.vision_proj(image_embeds[:,0,:]), dim=-1) + img_set.append(image_features) + txt_set.append(txt_feature) + + txt_features = torch.cat(txt_set, 0).float() # [image_num, feature_dim] + img_features = torch.cat(img_set, 0).float() # [image_num, feature_dim] + rewards = torch.sum(torch.mul(txt_features, img_features), dim=1, keepdim=True) + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/CLIPScore.py b/pytorch_svgrender/libs/metric/ImageReward/models/CLIPScore.py new file mode 100644 index 0000000000000000000000000000000000000000..8aba714ed0da54704a22e9a34c4c639be9c0aec3 --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/CLIPScore.py @@ -0,0 +1,78 @@ +''' +@File : CLIPScore.py +@Time : 2023/02/12 13:14:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +@Description: CLIPScore. +* Based on CLIP code base +* https://github.com/openai/CLIP +''' + +import torch +import torch.nn as nn +import torch.nn.functional as F +from PIL import Image +import clip + +class CLIPScore(nn.Module): + def __init__(self, download_root, device='cpu'): + super().__init__() + self.device = device + self.clip_model, self.preprocess = clip.load("ViT-L/14", device=self.device, jit=False, + download_root=download_root) + + if device == "cpu": + self.clip_model.float() + else: + clip.model.convert_weights(self.clip_model) # Actually this line is unnecessary since clip by default already on float16 + + # have clip.logit_scale require no grad. + self.clip_model.logit_scale.requires_grad_(False) + + + def score(self, prompt, image_path): + + if (type(image_path).__name__=='list'): + _, rewards = self.inference_rank(prompt, image_path) + return rewards + + # text encode + text = clip.tokenize(prompt, truncate=True).to(self.device) + txt_features = F.normalize(self.clip_model.encode_text(text)) + + # image encode + pil_image = Image.open(image_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)) + + # score + rewards = torch.sum(torch.mul(txt_features, image_features), dim=1, keepdim=True) + + return rewards.detach().cpu().numpy().item() + + + def inference_rank(self, prompt, generations_list): + + text = clip.tokenize(prompt, truncate=True).to(self.device) + txt_feature = F.normalize(self.clip_model.encode_text(text)) + + txt_set = [] + img_set = [] + for generations in generations_list: + # image encode + img_path = generations + pil_image = Image.open(img_path) + image = self.preprocess(pil_image).unsqueeze(0).to(self.device) + image_features = F.normalize(self.clip_model.encode_image(image)) + img_set.append(image_features) + txt_set.append(txt_feature) + + txt_features = torch.cat(txt_set, 0).float() # [image_num, feature_dim] + img_features = torch.cat(img_set, 0).float() # [image_num, feature_dim] + rewards = torch.sum(torch.mul(txt_features, img_features), dim=1, keepdim=True) + rewards = torch.squeeze(rewards) + _, rank = torch.sort(rewards, dim=0, descending=True) + _, indices = torch.sort(rank, dim=0) + indices = indices + 1 + + return indices.detach().cpu().numpy().tolist(), rewards.detach().cpu().numpy().tolist() \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/models/__init__.py b/pytorch_svgrender/libs/metric/ImageReward/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba230b0a38758ee78a4eba7caeedc259a1a4dbb --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/models/__init__.py @@ -0,0 +1,4 @@ +from .AestheticScore import * +from .BLIPScore import * +from .CLIPScore import * +from .BLIP import * \ No newline at end of file diff --git a/pytorch_svgrender/libs/metric/ImageReward/utils.py b/pytorch_svgrender/libs/metric/ImageReward/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fefc3cfdcf376e14721f6e8c727fb62ab831d0dd --- /dev/null +++ b/pytorch_svgrender/libs/metric/ImageReward/utils.py @@ -0,0 +1,165 @@ +''' +@File : utils.py +@Time : 2023/04/05 19:18:00 +@Auther : Jiazheng Xu +@Contact : xjz22@mails.tsinghua.edu.cn +* Based on CLIP code base +* https://github.com/openai/CLIP +* Checkpoint of CLIP/BLIP/Aesthetic are from: +* https://github.com/openai/CLIP +* https://github.com/salesforce/BLIP +* https://github.com/christophschuhmann/improved-aesthetic-predictor +''' + +import os +import urllib +from typing import Union, List +from .ImageReward import ImageReward +import torch +from tqdm import tqdm +from huggingface_hub import hf_hub_download +from .models.CLIPScore import CLIPScore +from .models.BLIPScore import BLIPScore +from .models.AestheticScore import AestheticScore + +_MODELS = { + "ImageReward-v1.0": "https://huggingface.co/THUDM/ImageReward/blob/main/ImageReward.pt", +} + + +def available_models() -> List[str]: + """Returns the names of available ImageReward models""" + return list(_MODELS.keys()) + + +def ImageReward_download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + download_target = os.path.join(root, filename) + hf_hub_download(repo_id="THUDM/ImageReward", filename=filename, local_dir=root) + return download_target + + +def load(name: str = "ImageReward-v1.0", device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", download_root: str = None, med_config: str = None): + """Load a ImageReward model + + Parameters + ---------- + name : str + A model name listed by `ImageReward.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + download_root: str + path to download the model files; by default, it uses "~/.cache/ImageReward" + + Returns + ------- + model : torch.nn.Module + The ImageReward model + """ + if name in _MODELS: + model_path = ImageReward_download(_MODELS[name], download_root or os.path.expanduser("~/.cache/ImageReward")) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + print('load checkpoint from %s'%model_path) + state_dict = torch.load(model_path, map_location='cpu') + + # med_config + if med_config is None: + med_config = ImageReward_download("https://huggingface.co/THUDM/ImageReward/blob/main/med_config.json", download_root or os.path.expanduser("~/.cache/ImageReward")) + + model = ImageReward(device=device, med_config=med_config).to(device) + msg = model.load_state_dict(state_dict,strict=False) + print("checkpoint loaded") + model.eval() + + return model + + +_SCORES = { + "CLIP": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", + "BLIP": "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large.pth", + "Aesthetic": "https://github.com/christophschuhmann/improved-aesthetic-predictor/raw/main/sac%2Blogos%2Bava1-l14-linearMSE.pth", +} + + +def available_scores() -> List[str]: + """Returns the names of available ImageReward scores""" + return list(_SCORES.keys()) + + +def _download(url: str, root: str): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + return download_target + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True, unit_divisor=1024) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + return download_target + + +def load_score(name: str = "CLIP", device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", download_root: str = None): + """Load a ImageReward model + + Parameters + ---------- + name : str + A model name listed by `ImageReward.available_models()` + + device : Union[str, torch.device] + The device to put the loaded model + + download_root: str + path to download the model files; by default, it uses "~/.cache/ImageReward" + + Returns + ------- + model : torch.nn.Module + The ImageReward model + """ + model_download_root = download_root or os.path.expanduser("~/.cache/ImageReward") + + if name in _SCORES: + model_path = _download(_SCORES[name], model_download_root) + else: + raise RuntimeError(f"Score {name} not found; available scores = {available_scores()}") + + print('load checkpoint from %s'%model_path) + if name == "BLIP": + state_dict = torch.load(model_path, map_location='cpu') + med_config = ImageReward_download("https://huggingface.co/THUDM/ImageReward/blob/main/med_config.json", model_download_root) + model = BLIPScore(med_config=med_config, device=device).to(device) + model.blip.load_state_dict(state_dict['model'],strict=False) + elif name == "CLIP": + model = CLIPScore(download_root=model_download_root, device=device).to(device) + elif name == "Aesthetic": + state_dict = torch.load(model_path, map_location='cpu') + model = AestheticScore(download_root=model_download_root, device=device).to(device) + model.mlp.load_state_dict(state_dict,strict=False) + else: + raise RuntimeError(f"Score {name} not found; available scores = {available_scores()}") + + print("checkpoint loaded") + model.eval() + + return model diff --git a/pytorch_svgrender/libs/metric/__init__.py b/pytorch_svgrender/libs/metric/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad761f2f5443eb41b15afc4116a66ecdfa9d918 --- /dev/null +++ b/pytorch_svgrender/libs/metric/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: diff --git a/pytorch_svgrender/libs/metric/accuracy.py b/pytorch_svgrender/libs/metric/accuracy.py new file mode 100644 index 0000000000000000000000000000000000000000..96ab47b38acde0e0e2f4cb6d924a90e9daaad681 --- /dev/null +++ b/pytorch_svgrender/libs/metric/accuracy.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + + +def accuracy(output, target, topk=(1,)): + """ + Computes the accuracy over the k top predictions for the specified values of k. + + Args + output: logits or probs (num of batch, num of classes) + target: (num of batch, 1) or (num of batch, ) + topk: list of returned k + + refer: https://github.com/pytorch/examples/blob/master/imagenet/main.py + """ + maxK = max(topk) # get k in top-k + batch_size = target.size(0) + + _, pred = output.topk(k=maxK, dim=1, largest=True, sorted=True) # pred: [num of batch, k] + pred = pred.t() # pred: [k, num of batch] + + # [1, num of batch] -> [k, num_of_batch] : bool + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) + res.append(correct_k.mul_(100.0 / batch_size)) + return res # np.shape(res): [k, 1] diff --git a/pytorch_svgrender/libs/metric/clip_score/__init__.py b/pytorch_svgrender/libs/metric/clip_score/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7239bd179c2f1b9cefaf7e47d3aff49fe3dde7 --- /dev/null +++ b/pytorch_svgrender/libs/metric/clip_score/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .openaiCLIP_loss import CLIPScoreWrapper + +__all__ = ['CLIPScoreWrapper'] diff --git a/pytorch_svgrender/libs/metric/clip_score/openaiCLIP_loss.py b/pytorch_svgrender/libs/metric/clip_score/openaiCLIP_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..36bb96a9b34853c19c49d4d2b4120ad66bcd3921 --- /dev/null +++ b/pytorch_svgrender/libs/metric/clip_score/openaiCLIP_loss.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from typing import Union, List, Tuple +from collections import OrderedDict +from functools import partial + +import numpy as np +import torch +import torch.nn as nn +import torchvision.transforms as transforms + + +class CLIPScoreWrapper(nn.Module): + + def __init__(self, + clip_model_name: str, + download_root: str = None, + device: torch.device = "cuda" if torch.cuda.is_available() else "cpu", + jit: bool = False, + # additional params + visual_score: bool = False, + feats_loss_type: str = None, + feats_loss_weights: List[float] = None, + fc_loss_weight: float = None, + context_length: int = 77): + super().__init__() + + import clip # local import + + # check model info + self.clip_model_name = clip_model_name + self.device = device + self.available_models = clip.available_models() + assert clip_model_name in self.available_models, f"A model backbone: {clip_model_name} that does not exist" + + # load CLIP + self.model, self.preprocess = clip.load(clip_model_name, device, jit=jit, download_root=download_root) + self.model.eval() + + # load tokenize + self.tokenize_fn = partial(clip.tokenize, context_length=context_length) + + # load CLIP visual + self.visual_encoder = VisualEncoderWrapper(self.model, clip_model_name).to(device) + self.visual_encoder.eval() + + # check loss weights + self.visual_score = visual_score + if visual_score: + assert feats_loss_type in ["l1", "l2", "cosine"], f"{feats_loss_type} is not exist." + if clip_model_name.startswith("ViT"): assert len(feats_loss_weights) == 12 + if clip_model_name.startswith("RN"): assert len(feats_loss_weights) == 5 + + # load visual loss wrapper + self.visual_loss_fn = CLIPVisualLossWrapper(self.visual_encoder, feats_loss_type, + feats_loss_weights, + fc_loss_weight) + + @property + def input_resolution(self): + return self.model.visual.input_resolution # default: 224 + + @property + def resize(self): # Resize only + return transforms.Compose([self.preprocess.transforms[0]]) + + @property + def normalize(self): + return transforms.Compose([ + self.preprocess.transforms[0], # Resize + self.preprocess.transforms[1], # CenterCrop + self.preprocess.transforms[-1], # Normalize + ]) + + @property + def norm_(self): # Normalize only + return transforms.Compose([self.preprocess.transforms[-1]]) + + def encode_image_layer_wise(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: + semantic_vec, feature_maps = self.visual_encoder(x) + return semantic_vec, feature_maps + + def encode_text(self, text: Union[str, List[str]], norm: bool = True) -> torch.Tensor: + tokens = self.tokenize_fn(text).to(self.device) + text_features = self.model.encode_text(tokens) + if norm: + text_features = text_features.mean(axis=0, keepdim=True) + text_features_norm = text_features / text_features.norm(dim=-1, keepdim=True) + return text_features_norm + return text_features + + def encode_image(self, image: torch.Tensor, norm: bool = True) -> torch.Tensor: + image_features = self.model.encode_image(image) + if norm: + image_features_norm = image_features / image_features.norm(dim=-1, keepdim=True) + return image_features_norm + return image_features + + @torch.no_grad() + def predict(self, + image: torch.Tensor, + text: Union[str, List[str]]) -> Tuple[torch.Tensor, torch.Tensor, np.ndarray]: + image_features = self.model.encode_image(image) + text_tokenize = self.tokenize_fn(text).to(self.device) + text_features = self.model.encode_text(text_tokenize) + logits_per_image, logits_per_text = self.model(image, text) + probs = logits_per_image.softmax(dim=-1).cpu().numpy() + return image_features, text_features, probs + + def compute_text_visual_distance( + self, image: torch.Tensor, text: Union[str, List[str]] + ) -> torch.Tensor: + image_features = self.model.encode_image(image) + text_tokenize = self.tokenize_fn(text).to(self.device) + text_features = self.model.encode_text(text_tokenize) + + image_features_norm = image_features / image_features.norm(dim=-1, keepdim=True) + text_features_norm = text_features / text_features.norm(dim=-1, keepdim=True) + loss = - (image_features_norm @ text_features_norm.T) + return loss.mean() + + def directional_loss(self, src_text, src_img, tar_text, tar_img, thresh=None): + # delta img + img_direction = (tar_img - src_img) + img_direction_norm = img_direction / img_direction.norm(dim=-1, keepdim=True) + # delta text + text_direction = (1 * tar_text - src_text).repeat(tar_img.size(0), 1) + text_direction_norm = text_direction / text_direction.norm(dim=-1, keepdim=True) + # Directional CLIP Loss + loss_dir = (1 - torch.cosine_similarity(img_direction_norm, text_direction_norm, dim=1)) + if thresh is not None: + loss_dir[loss_dir < thresh] = 0 # set value=0 when lt 0 + return loss_dir.mean() + else: + return loss_dir.mean() + + def compute_visual_distance( + self, x: torch.Tensor, y: torch.Tensor, clip_norm: bool = True, + ) -> Tuple[torch.Tensor, List]: + # return a fc loss and the list of feat loss + assert self.visual_score is True + assert x.shape == y.shape + assert x.shape[-1] == self.input_resolution and x.shape[-2] == self.input_resolution + assert y.shape[-1] == self.input_resolution and y.shape[-2] == self.input_resolution + + if clip_norm: + return self.visual_loss_fn(self.normalize(x), self.normalize(y)) + else: + return self.visual_loss_fn(x, y) + + +class VisualEncoderWrapper(nn.Module): + """ + semantic features and layer by layer feature maps are obtained from CLIP visual encoder. + """ + + def __init__(self, clip_model: nn.Module, clip_model_name: str): + super().__init__() + self.clip_model = clip_model + self.clip_model_name = clip_model_name + + if clip_model_name.startswith("ViT"): + self.feature_maps = OrderedDict() + for i in range(12): # 12 ResBlocks in ViT visual transformer + self.clip_model.visual.transformer.resblocks[i].register_forward_hook( + self.make_hook(i) + ) + + if clip_model_name.startswith("RN"): + layers = list(self.clip_model.visual.children()) + init_layers = torch.nn.Sequential(*layers)[:8] + self.layer1 = layers[8] + self.layer2 = layers[9] + self.layer3 = layers[10] + self.layer4 = layers[11] + self.att_pool2d = layers[12] + + def make_hook(self, name): + def hook(module, input, output): + if len(output.shape) == 3: + # LND -> NLD (B, 77, 768) + self.feature_maps[name] = output.permute(1, 0, 2) + else: + self.feature_maps[name] = output + + return hook + + def _forward_vit(self, x: torch.Tensor) -> Tuple[torch.Tensor, List]: + fc_feature = self.clip_model.encode_image(x).float() + feature_maps = [self.feature_maps[k] for k in range(12)] + + # fc_feature len: 1 ,feature_maps len: 12 + return fc_feature, feature_maps + + def _forward_resnet(self, x: torch.Tensor) -> Tuple[torch.Tensor, List]: + def stem(m, x): + for conv, bn, relu in [(m.conv1, m.bn1, m.relu1), (m.conv2, m.bn2, m.relu2), (m.conv3, m.bn3, m.relu3)]: + x = torch.relu(bn(conv(x))) + x = m.avgpool(x) + return x + + x = x.type(self.clip_model.visual.conv1.weight.dtype) + x = stem(self.clip_model.visual, x) + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + y = self.att_pool2d(x4) + + # fc_features len: 1 ,feature_maps len: 5 + return y, [x, x1, x2, x3, x4] + + def forward(self, x) -> Tuple[torch.Tensor, List[torch.Tensor]]: + if self.clip_model_name.startswith("ViT"): + fc_feat, visual_feat_maps = self._forward_vit(x) + if self.clip_model_name.startswith("RN"): + fc_feat, visual_feat_maps = self._forward_resnet(x) + + return fc_feat, visual_feat_maps + + +class CLIPVisualLossWrapper(nn.Module): + """ + Visual Feature Loss + FC loss + """ + + def __init__( + self, + visual_encoder: nn.Module, + feats_loss_type: str = None, + feats_loss_weights: List[float] = None, + fc_loss_weight: float = None, + ): + super().__init__() + self.visual_encoder = visual_encoder + self.feats_loss_weights = feats_loss_weights + self.fc_loss_weight = fc_loss_weight + + self.layer_criterion = layer_wise_distance(feats_loss_type) + + def forward(self, x: torch.Tensor, y: torch.Tensor): + x_fc_feature, x_feat_maps = self.visual_encoder(x) + y_fc_feature, y_feat_maps = self.visual_encoder(y) + + # visual feature loss + if sum(self.feats_loss_weights) == 0: + feats_loss_list = [torch.tensor(0, device=x.device)] + else: + feats_loss = self.layer_criterion(x_feat_maps, y_feat_maps, self.visual_encoder.clip_model_name) + feats_loss_list = [] + for layer, w in enumerate(self.feats_loss_weights): + if w: + feats_loss_list.append(feats_loss[layer] * w) + + # visual fc loss, default: cosine similarity + if self.fc_loss_weight == 0: + fc_loss = torch.tensor(0, device=x.device) + else: + fc_loss = (1 - torch.cosine_similarity(x_fc_feature, y_fc_feature, dim=1)).mean() + fc_loss = fc_loss * self.fc_loss_weight + + return fc_loss, feats_loss_list + + +################################################################################# +# layer wise metric # +################################################################################# + +def layer_wise_distance(metric_name: str): + return { + "l1": l1_layer_wise, + "l2": l2_layer_wise, + "cosine": cosine_layer_wise + }.get(metric_name.lower()) + + +def l2_layer_wise(x_features, y_features, clip_model_name): + return [ + torch.square(x_conv - y_conv).mean() + for x_conv, y_conv in zip(x_features, y_features) + ] + + +def l1_layer_wise(x_features, y_features, clip_model_name): + return [ + torch.abs(x_conv - y_conv).mean() + for x_conv, y_conv in zip(x_features, y_features) + ] + + +def cosine_layer_wise(x_features, y_features, clip_model_name): + if clip_model_name.startswith("RN"): + return [ + (1 - torch.cosine_similarity(x_conv, y_conv, dim=1)).mean() + for x_conv, y_conv in zip(x_features, y_features) + ] + return [ + (1 - torch.cosine_similarity(x_conv, y_conv, dim=1)).mean() + for x_conv, y_conv in zip(x_features, y_features) + ] diff --git a/pytorch_svgrender/libs/metric/lpips_origin/__init__.py b/pytorch_svgrender/libs/metric/lpips_origin/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..8cb4332c66e4725112fad88db377e3a50471f3ee --- /dev/null +++ b/pytorch_svgrender/libs/metric/lpips_origin/__init__.py @@ -0,0 +1,3 @@ +from .lpips import LPIPS + +__all__ = ['LPIPS'] diff --git a/pytorch_svgrender/libs/metric/lpips_origin/lpips.py b/pytorch_svgrender/libs/metric/lpips_origin/lpips.py new file mode 100755 index 0000000000000000000000000000000000000000..fa97aad10e2b18fa5b92b913e9def4f5135a83c0 --- /dev/null +++ b/pytorch_svgrender/libs/metric/lpips_origin/lpips.py @@ -0,0 +1,184 @@ +from __future__ import absolute_import + +import os + +import torch +import torch.nn as nn + +from . import pretrained_networks as pretrained_torch_models + + +def spatial_average(x, keepdim=True): + return x.mean([2, 3], keepdim=keepdim) + + +def upsample(x): + return nn.Upsample(size=x.shape[2:], mode='bilinear', align_corners=False)(x) + + +def normalize_tensor(in_feat, eps=1e-10): + norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True)) + return in_feat / (norm_factor + eps) + + +# Learned perceptual metric +class LPIPS(nn.Module): + + def __init__(self, + pretrained=True, + net='alex', + version='0.1', + lpips=True, + spatial=False, + pnet_rand=False, + pnet_tune=False, + use_dropout=True, + model_path=None, + eval_mode=True, + verbose=True): + """ Initializes a perceptual loss torch.nn.Module + + Parameters (default listed first) + --------------------------------- + lpips : bool + [True] use linear layers on top of base/trunk network + [False] means no linear layers; each layer is averaged together + pretrained : bool + This flag controls the linear layers, which are only in effect when lpips=True above + [True] means linear layers are calibrated with human perceptual judgments + [False] means linear layers are randomly initialized + pnet_rand : bool + [False] means trunk loaded with ImageNet classification weights + [True] means randomly initialized trunk + net : str + ['alex','vgg','squeeze'] are the base/trunk networks available + version : str + ['v0.1'] is the default and latest + ['v0.0'] contained a normalization bug; corresponds to old arxiv v1 (https://arxiv.org/abs/1801.03924v1) + model_path : 'str' + [None] is default and loads the pretrained weights from paper https://arxiv.org/abs/1801.03924v1 + + The following parameters should only be changed if training the network: + + eval_mode : bool + [True] is for test mode (default) + [False] is for training mode + pnet_tune + [False] keep base/trunk frozen + [True] tune the base/trunk network + use_dropout : bool + [True] to use dropout when training linear layers + [False] for no dropout when training linear layers + """ + super(LPIPS, self).__init__() + if verbose: + print('Setting up [%s] perceptual loss: trunk [%s], v[%s], spatial [%s]' % + ('LPIPS' if lpips else 'baseline', net, version, 'on' if spatial else 'off')) + + self.pnet_type = net + self.pnet_tune = pnet_tune + self.pnet_rand = pnet_rand + self.spatial = spatial + self.lpips = lpips # false means baseline of just averaging all layers + self.version = version + self.scaling_layer = ScalingLayer() + + if self.pnet_type in ['vgg', 'vgg16']: + net_type = pretrained_torch_models.vgg16 + self.chns = [64, 128, 256, 512, 512] + elif self.pnet_type == 'alex': + net_type = pretrained_torch_models.alexnet + self.chns = [64, 192, 384, 256, 256] + elif self.pnet_type == 'squeeze': + net_type = pretrained_torch_models.squeezenet + self.chns = [64, 128, 256, 384, 384, 512, 512] + self.L = len(self.chns) + + self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) + + if lpips: + self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) + self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) + self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) + self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) + self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) + self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] + if self.pnet_type == 'squeeze': # 7 layers for squeezenet + self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) + self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) + self.lins += [self.lin5, self.lin6] + self.lins = nn.ModuleList(self.lins) + + if pretrained: + if model_path is None: + model_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + f"weights/v{version}/{net}.pth" + ) + if verbose: + print('Loading model from: %s' % model_path) + self.load_state_dict(torch.load(model_path, map_location='cpu'), strict=False) + + if eval_mode: + self.eval() + + def forward(self, in0, in1, return_per_layer=False, normalize=False): + if normalize: # turn on this flag if input is [0,1] so it can be adjusted to [-1, 1] + in0 = 2 * in0 - 1 + in1 = 2 * in1 - 1 + + # Noting: v0.0 - original release had a bug, where input was not scaled + if self.version == '0.1': + in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) + else: + in0_input, in1_input = in0, in1 + + # model forward + outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input) + + feats0, feats1, diffs = {}, {}, {} + for kk in range(self.L): + feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) + diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 + + if self.lpips: + if self.spatial: + res = [upsample(self.lins[kk](diffs[kk])) for kk in range(self.L)] + else: + res = [spatial_average(self.lins[kk](diffs[kk]), keepdim=True) for kk in range(self.L)] + else: + if self.spatial: + res = [upsample(diffs[kk].sum(dim=1, keepdim=True)) for kk in range(self.L)] + else: + res = [spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True) for kk in range(self.L)] + + loss = sum(res) + + if return_per_layer: + return loss, res + else: + return loss + + +class ScalingLayer(nn.Module): + def __init__(self): + super(ScalingLayer, self).__init__() + self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) + self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) + + def forward(self, inp): + return (inp - self.shift) / self.scale + + +class NetLinLayer(nn.Module): + """A single linear layer which does a 1x1 conv""" + + def __init__(self, chn_in, chn_out=1, use_dropout=False): + super(NetLinLayer, self).__init__() + + layers = [nn.Dropout(), ] if (use_dropout) else [] + layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] + self.model = nn.Sequential(*layers) + + def forward(self, x): + return self.model(x) diff --git a/pytorch_svgrender/libs/metric/lpips_origin/pretrained_networks.py b/pytorch_svgrender/libs/metric/lpips_origin/pretrained_networks.py new file mode 100644 index 0000000000000000000000000000000000000000..484b808da02eecb59c132e63a0fe4ae90b1e4d2e --- /dev/null +++ b/pytorch_svgrender/libs/metric/lpips_origin/pretrained_networks.py @@ -0,0 +1,196 @@ +from collections import namedtuple + +import torch +import torchvision.models as tv_models + + +class squeezenet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(squeezenet, self).__init__() + pretrained_features = tv_models.squeezenet1_1(weights=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.slice6 = torch.nn.Sequential() + self.slice7 = torch.nn.Sequential() + self.N_slices = 7 + for x in range(2): + self.slice1.add_module(str(x), pretrained_features[x]) + for x in range(2, 5): + self.slice2.add_module(str(x), pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), pretrained_features[x]) + for x in range(10, 11): + self.slice5.add_module(str(x), pretrained_features[x]) + for x in range(11, 12): + self.slice6.add_module(str(x), pretrained_features[x]) + for x in range(12, 13): + self.slice7.add_module(str(x), pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + h = self.slice6(h) + h_relu6 = h + h = self.slice7(h) + h_relu7 = h + vgg_outputs = namedtuple("SqueezeOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5', 'relu6', 'relu7']) + out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7) + + return out + + +class alexnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(alexnet, self).__init__() + weights = tv_models.AlexNet_Weights.IMAGENET1K_V1 if pretrained else None + alexnet_pretrained_features = tv_models.alexnet(weights=weights).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(2): + self.slice1.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(2, 5): + self.slice2.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(5, 8): + self.slice3.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(8, 10): + self.slice4.add_module(str(x), alexnet_pretrained_features[x]) + for x in range(10, 12): + self.slice5.add_module(str(x), alexnet_pretrained_features[x]) + + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1 = h + h = self.slice2(h) + h_relu2 = h + h = self.slice3(h) + h_relu3 = h + h = self.slice4(h) + h_relu4 = h + h = self.slice5(h) + h_relu5 = h + alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) + out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) + + return out + + +class vgg16(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True): + super(vgg16, self).__init__() + weights = tv_models.VGG16_Weights.IMAGENET1K_V1 if pretrained else None + vgg_pretrained_features = tv_models.vgg16(weights=weights).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + self.N_slices = 5 + for x in range(4): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(4, 9): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(9, 16): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(16, 23): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(23, 30): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h = self.slice1(X) + h_relu1_2 = h + h = self.slice2(h) + h_relu2_2 = h + h = self.slice3(h) + h_relu3_3 = h + h = self.slice4(h) + h_relu4_3 = h + h = self.slice5(h) + h_relu5_3 = h + vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) + out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) + + return out + + +class resnet(torch.nn.Module): + def __init__(self, requires_grad=False, pretrained=True, num=18): + super(resnet, self).__init__() + + if num == 18: + weights = tv_models.ResNet18_Weights.IMAGENET1K_V1 if pretrained else None + self.net = tv_models.resnet18(weights=weights) + elif num == 34: + weights = tv_models.ResNet34_Weights.IMAGENET1K_V1 if pretrained else None + self.net = tv_models.resnet34(weights=weights) + elif num == 50: + weights = tv_models.ResNet50_Weights.IMAGENET1K_V2 if pretrained else None + self.net = tv_models.resnet50(weights=weights) + elif num == 101: + weights = tv_models.ResNet101_Weights.IMAGENET1K_V2 if pretrained else None + self.net = tv_models.resnet101(weights=weights) + elif num == 152: + weights = tv_models.ResNet152_Weights.IMAGENET1K_V2 if pretrained else None + self.net = tv_models.resnet152(weights=weights) + self.N_slices = 5 + + if not requires_grad: + for param in self.net.parameters(): + param.requires_grad = False + + self.conv1 = self.net.conv1 + self.bn1 = self.net.bn1 + self.relu = self.net.relu + self.maxpool = self.net.maxpool + self.layer1 = self.net.layer1 + self.layer2 = self.net.layer2 + self.layer3 = self.net.layer3 + self.layer4 = self.net.layer4 + + def forward(self, X): + h = self.conv1(X) + h = self.bn1(h) + h = self.relu(h) + h_relu1 = h + h = self.maxpool(h) + h = self.layer1(h) + h_conv2 = h + h = self.layer2(h) + h_conv3 = h + h = self.layer3(h) + h_conv4 = h + h = self.layer4(h) + h_conv5 = h + + outputs = namedtuple("Outputs", ['relu1', 'conv2', 'conv3', 'conv4', 'conv5']) + out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) + + return out diff --git a/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/alex.pth b/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/alex.pth new file mode 100644 index 0000000000000000000000000000000000000000..fa4067abc5d4da16a7204fd94776506e4868030e --- /dev/null +++ b/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/alex.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df73285e35b22355a2df87cdb6b70b343713b667eddbda73e1977e0c860835c0 +size 6009 diff --git a/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/squeeze.pth b/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/squeeze.pth new file mode 100644 index 0000000000000000000000000000000000000000..f892a84a130828b1c9e2e8156e84fc5a962c665d --- /dev/null +++ b/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/squeeze.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a5350f23600cb79923ce65bb07cbf57dca461329894153e05a1346bd531cf76 +size 10811 diff --git a/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/vgg.pth b/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/vgg.pth new file mode 100644 index 0000000000000000000000000000000000000000..f57dcf5cc764d61c8a460365847fb2137ff0a62d --- /dev/null +++ b/pytorch_svgrender/libs/metric/lpips_origin/weights/v0.1/vgg.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a78928a0af1e5f0fcb1f3b9e8f8c3a2a5a3de244d830ad5c1feddc79b8432868 +size 7289 diff --git a/pytorch_svgrender/libs/metric/piq/__init__.py b/pytorch_svgrender/libs/metric/piq/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..54f5e8e115957584d775c4ff25aec65a4a17085e --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +# install: pip install piq +# repo: https://github.com/photosynthesis-team/piq diff --git a/pytorch_svgrender/libs/metric/piq/functional/__init__.py b/pytorch_svgrender/libs/metric/piq/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..131231bd3249723d3c21d98c45109ff99fb19612 --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/functional/__init__.py @@ -0,0 +1,15 @@ +from .base import ifftshift, get_meshgrid, similarity_map, gradient_map, pow_for_complex, crop_patches +from .colour_conversion import rgb2lmn, rgb2xyz, xyz2lab, rgb2lab, rgb2yiq, rgb2lhm +from .filters import haar_filter, hann_filter, scharr_filter, prewitt_filter, gaussian_filter +from .filters import binomial_filter1d, average_filter2d +from .layers import L2Pool2d +from .resize import imresize + +__all__ = [ + 'ifftshift', 'get_meshgrid', 'similarity_map', 'gradient_map', 'pow_for_complex', 'crop_patches', + 'rgb2lmn', 'rgb2xyz', 'xyz2lab', 'rgb2lab', 'rgb2yiq', 'rgb2lhm', + 'haar_filter', 'hann_filter', 'scharr_filter', 'prewitt_filter', 'gaussian_filter', + 'binomial_filter1d', 'average_filter2d', + 'L2Pool2d', + 'imresize', +] diff --git a/pytorch_svgrender/libs/metric/piq/functional/base.py b/pytorch_svgrender/libs/metric/piq/functional/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a8d34790ad89b225fd28ade34507c498a315fb6d --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/functional/base.py @@ -0,0 +1,111 @@ +r"""General purpose functions""" +from typing import Tuple, Union, Optional +import torch +from ..utils import _parse_version + + +def ifftshift(x: torch.Tensor) -> torch.Tensor: + r""" Similar to np.fft.ifftshift but applies to PyTorch Tensors""" + shift = [-(ax // 2) for ax in x.size()] + return torch.roll(x, shift, tuple(range(len(shift)))) + + +def get_meshgrid(size: Tuple[int, int], device: Optional[str] = None, dtype: Optional[type] = None) -> torch.Tensor: + r"""Return coordinate grid matrices centered at zero point. + Args: + size: Shape of meshgrid to create + device: device to use for creation + dtype: dtype to use for creation + Returns: + Meshgrid of size on device with dtype values. + """ + if size[0] % 2: + # Odd + x = torch.arange(-(size[0] - 1) / 2, size[0] / 2, device=device, dtype=dtype) / (size[0] - 1) + else: + # Even + x = torch.arange(- size[0] / 2, size[0] / 2, device=device, dtype=dtype) / size[0] + + if size[1] % 2: + # Odd + y = torch.arange(-(size[1] - 1) / 2, size[1] / 2, device=device, dtype=dtype) / (size[1] - 1) + else: + # Even + y = torch.arange(- size[1] / 2, size[1] / 2, device=device, dtype=dtype) / size[1] + # Use indexing param depending on torch version + recommended_torch_version = _parse_version("1.10.0") + torch_version = _parse_version(torch.__version__) + if len(torch_version) > 0 and torch_version >= recommended_torch_version: + return torch.meshgrid(x, y, indexing='ij') + return torch.meshgrid(x, y) + + +def similarity_map(map_x: torch.Tensor, map_y: torch.Tensor, constant: float, alpha: float = 0.0) -> torch.Tensor: + r""" Compute similarity_map between two tensors using Dice-like equation. + + Args: + map_x: Tensor with map to be compared + map_y: Tensor with map to be compared + constant: Used for numerical stability + alpha: Masking coefficient. Subtracts - `alpha` * map_x * map_y from denominator and nominator + """ + return (2.0 * map_x * map_y - alpha * map_x * map_y + constant) / \ + (map_x ** 2 + map_y ** 2 - alpha * map_x * map_y + constant) + + +def gradient_map(x: torch.Tensor, kernels: torch.Tensor) -> torch.Tensor: + r""" Compute gradient map for a given tensor and stack of kernels. + + Args: + x: Tensor with shape (N, C, H, W). + kernels: Stack of tensors for gradient computation with shape (k_N, k_H, k_W) + Returns: + Gradients of x per-channel with shape (N, C, H, W) + """ + padding = kernels.size(-1) // 2 + grads = torch.nn.functional.conv2d(x, kernels, padding=padding) + + return torch.sqrt(torch.sum(grads ** 2, dim=-3, keepdim=True)) + + +def pow_for_complex(base: torch.Tensor, exp: Union[int, float]) -> torch.Tensor: + r""" Takes the power of each element in a 4D tensor with negative values or 5D tensor with complex values. + Complex numbers are represented by modulus and argument: r * \exp(i * \phi). + + It will likely to be redundant with introduction of torch.ComplexTensor. + + Args: + base: Tensor with shape (N, C, H, W) or (N, C, H, W, 2). + exp: Exponent + Returns: + Complex tensor with shape (N, C, H, W, 2). + """ + if base.dim() == 4: + x_complex_r = base.abs() + x_complex_phi = torch.atan2(torch.zeros_like(base), base) + elif base.dim() == 5 and base.size(-1) == 2: + x_complex_r = base.pow(2).sum(dim=-1).sqrt() + x_complex_phi = torch.atan2(base[..., 1], base[..., 0]) + else: + raise ValueError(f'Expected real or complex tensor, got {base.size()}') + + x_complex_pow_r = x_complex_r ** exp + x_complex_pow_phi = x_complex_phi * exp + x_real_pow = x_complex_pow_r * torch.cos(x_complex_pow_phi) + x_imag_pow = x_complex_pow_r * torch.sin(x_complex_pow_phi) + return torch.stack((x_real_pow, x_imag_pow), dim=-1) + + +def crop_patches(x: torch.Tensor, size=64, stride=32) -> torch.Tensor: + r"""Crop tensor with images into small patches + Args: + x: Tensor with shape (N, C, H, W), expected to be images-like entities + size: Size of a square patch + stride: Step between patches + """ + assert (x.shape[2] >= size) and (x.shape[3] >= size), \ + f"Images must be bigger than patch size. Got ({x.shape[2], x.shape[3]}) and ({size}, {size})" + channels = x.shape[1] + patches = x.unfold(1, channels, channels).unfold(2, size, stride).unfold(3, size, stride) + patches = patches.reshape(-1, channels, size, size) + return patches diff --git a/pytorch_svgrender/libs/metric/piq/functional/colour_conversion.py b/pytorch_svgrender/libs/metric/piq/functional/colour_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..9de6eb031a60aa765a326cb6ef8cf67c37177d97 --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/functional/colour_conversion.py @@ -0,0 +1,136 @@ +r"""Colour space conversion functions""" +from typing import Union, Dict +import torch + + +def rgb2lmn(x: torch.Tensor) -> torch.Tensor: + r"""Convert a batch of RGB images to a batch of LMN images + + Args: + x: Batch of images with shape (N, 3, H, W). RGB colour space. + + Returns: + Batch of images with shape (N, 3, H, W). LMN colour space. + """ + weights_rgb_to_lmn = torch.tensor([[0.06, 0.63, 0.27], + [0.30, 0.04, -0.35], + [0.34, -0.6, 0.17]], dtype=x.dtype, device=x.device).t() + x_lmn = torch.matmul(x.permute(0, 2, 3, 1), weights_rgb_to_lmn).permute(0, 3, 1, 2) + return x_lmn + + +def rgb2xyz(x: torch.Tensor) -> torch.Tensor: + r"""Convert a batch of RGB images to a batch of XYZ images + + Args: + x: Batch of images with shape (N, 3, H, W). RGB colour space. + + Returns: + Batch of images with shape (N, 3, H, W). XYZ colour space. + """ + mask_below = (x <= 0.04045).type(x.dtype) + mask_above = (x > 0.04045).type(x.dtype) + + tmp = x / 12.92 * mask_below + torch.pow((x + 0.055) / 1.055, 2.4) * mask_above + + weights_rgb_to_xyz = torch.tensor([[0.4124564, 0.3575761, 0.1804375], + [0.2126729, 0.7151522, 0.0721750], + [0.0193339, 0.1191920, 0.9503041]], dtype=x.dtype, device=x.device) + + x_xyz = torch.matmul(tmp.permute(0, 2, 3, 1), weights_rgb_to_xyz.t()).permute(0, 3, 1, 2) + return x_xyz + + +def xyz2lab(x: torch.Tensor, illuminant: str = 'D50', observer: str = '2') -> torch.Tensor: + r"""Convert a batch of XYZ images to a batch of LAB images + + Args: + x: Batch of images with shape (N, 3, H, W). XYZ colour space. + illuminant: {“A”, “D50”, “D55”, “D65”, “D75”, “E”}, optional. The name of the illuminant. + observer: {“2”, “10”}, optional. The aperture angle of the observer. + + Returns: + Batch of images with shape (N, 3, H, W). LAB colour space. + """ + epsilon = 0.008856 + kappa = 903.3 + illuminants: Dict[str, Dict] = \ + {"A": {'2': (1.098466069456375, 1, 0.3558228003436005), + '10': (1.111420406956693, 1, 0.3519978321919493)}, + "D50": {'2': (0.9642119944211994, 1, 0.8251882845188288), + '10': (0.9672062750333777, 1, 0.8142801513128616)}, + "D55": {'2': (0.956797052643698, 1, 0.9214805860173273), + '10': (0.9579665682254781, 1, 0.9092525159847462)}, + "D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white` + '10': (0.94809667673716, 1, 1.0730513595166162)}, + "D75": {'2': (0.9497220898840717, 1, 1.226393520724154), + '10': (0.9441713925645873, 1, 1.2064272211720228)}, + "E": {'2': (1.0, 1.0, 1.0), + '10': (1.0, 1.0, 1.0)}} + + illuminants_to_use = torch.tensor(illuminants[illuminant][observer], + dtype=x.dtype, device=x.device).view(1, 3, 1, 1) + + tmp = x / illuminants_to_use + + mask_below = (tmp <= epsilon).type(x.dtype) + mask_above = (tmp > epsilon).type(x.dtype) + tmp = torch.pow(tmp, 1. / 3.) * mask_above + (kappa * tmp + 16.) / 116. * mask_below + + weights_xyz_to_lab = torch.tensor([[0, 116., 0], + [500., -500., 0], + [0, 200., -200.]], dtype=x.dtype, device=x.device) + bias_xyz_to_lab = torch.tensor([-16., 0., 0.], dtype=x.dtype, device=x.device).view(1, 3, 1, 1) + + x_lab = torch.matmul(tmp.permute(0, 2, 3, 1), weights_xyz_to_lab.t()).permute(0, 3, 1, 2) + bias_xyz_to_lab + return x_lab + + +def rgb2lab(x: torch.Tensor, data_range: Union[int, float] = 255) -> torch.Tensor: + r"""Convert a batch of RGB images to a batch of LAB images + + Args: + x: Batch of images with shape (N, 3, H, W). RGB colour space. + data_range: dynamic range of the input image. + + Returns: + Batch of images with shape (N, 3, H, W). LAB colour space. + """ + return xyz2lab(rgb2xyz(x / float(data_range))) + + +def rgb2yiq(x: torch.Tensor) -> torch.Tensor: + r"""Convert a batch of RGB images to a batch of YIQ images + + Args: + x: Batch of images with shape (N, 3, H, W). RGB colour space. + + Returns: + Batch of images with shape (N, 3, H, W). YIQ colour space. + """ + yiq_weights = torch.tensor([ + [0.299, 0.587, 0.114], + [0.5959, -0.2746, -0.3213], + [0.2115, -0.5227, 0.3112]], dtype=x.dtype, device=x.device).t() + x_yiq = torch.matmul(x.permute(0, 2, 3, 1), yiq_weights).permute(0, 3, 1, 2) + return x_yiq + + +def rgb2lhm(x: torch.Tensor) -> torch.Tensor: + r"""Convert a batch of RGB images to a batch of LHM images + + Args: + x: Batch of images with shape (N, 3, H, W). RGB colour space. + + Returns: + Batch of images with shape (N, 3, H, W). LHM colour space. + + Reference: + https://arxiv.org/pdf/1608.07433.pdf + """ + lhm_weights = torch.tensor([ + [0.2989, 0.587, 0.114], + [0.3, 0.04, -0.35], + [0.34, -0.6, 0.17]], dtype=x.dtype, device=x.device).t() + x_lhm = torch.matmul(x.permute(0, 2, 3, 1), lhm_weights).permute(0, 3, 1, 2) + return x_lhm diff --git a/pytorch_svgrender/libs/metric/piq/functional/filters.py b/pytorch_svgrender/libs/metric/piq/functional/filters.py new file mode 100644 index 0000000000000000000000000000000000000000..ff5ef1ac5110fa57b75de7476567a409842c0dfc --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/functional/filters.py @@ -0,0 +1,111 @@ +r"""Filters for gradient computation, bluring, etc.""" +import torch +import numpy as np +from typing import Optional + + +def haar_filter(kernel_size: int, device: Optional[str] = None, dtype: Optional[type] = None) -> torch.Tensor: + r"""Creates Haar kernel + + Args: + kernel_size: size of the kernel + device: target device for kernel generation + dtype: target data type for kernel generation + Returns: + kernel: Tensor with shape (1, kernel_size, kernel_size) + """ + kernel = torch.ones((kernel_size, kernel_size), device=device, dtype=dtype) / kernel_size + kernel[kernel_size // 2:, :] = - kernel[kernel_size // 2:, :] + return kernel.unsqueeze(0) + + +def hann_filter(kernel_size: int, device: Optional[str] = None, dtype: Optional[type] = None) -> torch.Tensor: + r"""Creates Hann kernel + Args: + kernel_size: size of the kernel + device: target device for kernel generation + dtype: target data type for kernel generation + Returns: + kernel: Tensor with shape (1, kernel_size, kernel_size) + """ + # Take bigger window and drop borders + window = torch.hann_window(kernel_size + 2, periodic=False, device=device, dtype=dtype)[1:-1] + kernel = window[:, None] * window[None, :] + # Normalize and reshape kernel + return kernel.view(1, kernel_size, kernel_size) / kernel.sum() + + +def gaussian_filter(kernel_size: int, sigma: float, device: Optional[str] = None, + dtype: Optional[type] = None) -> torch.Tensor: + r"""Returns 2D Gaussian kernel N(0,`sigma`^2) + Args: + size: Size of the kernel + sigma: Std of the distribution + device: target device for kernel generation + dtype: target data type for kernel generation + Returns: + gaussian_kernel: Tensor with shape (1, kernel_size, kernel_size) + """ + coords = torch.arange(kernel_size, dtype=dtype, device=device) + coords -= (kernel_size - 1) / 2. + + g = coords ** 2 + g = (- (g.unsqueeze(0) + g.unsqueeze(1)) / (2 * sigma ** 2)).exp() + + g /= g.sum() + return g.unsqueeze(0) + + +# Gradient operator kernels +def scharr_filter(device: Optional[str] = None, dtype: Optional[type] = None) -> torch.Tensor: + r"""Utility function that returns a normalized 3x3 Scharr kernel in X direction + + Args: + device: target device for kernel generation + dtype: target data type for kernel generation + Returns: + kernel: Tensor with shape (1, 3, 3) + """ + return torch.tensor([[[-3., 0., 3.], [-10., 0., 10.], [-3., 0., 3.]]], device=device, dtype=dtype) / 16 + + +def prewitt_filter(device: Optional[str] = None, dtype: Optional[type] = None) -> torch.Tensor: + r"""Utility function that returns a normalized 3x3 Prewitt kernel in X direction + + Args: + device: target device for kernel generation + dtype: target data type for kernel generation + Returns: + kernel: Tensor with shape (1, 3, 3)""" + return torch.tensor([[[-1., 0., 1.], [-1., 0., 1.], [-1., 0., 1.]]], device=device, dtype=dtype) / 3 + + +def binomial_filter1d(kernel_size: int, device: Optional[str] = None, dtype: Optional[type] = None) -> torch.Tensor: + r"""Creates 1D normalized binomial filter + + Args: + kernel_size (int): kernel size + device: target device for kernel generation + dtype: target data type for kernel generation + + Returns: + Binomial kernel with shape (1, 1, kernel_size) + """ + kernel = np.poly1d([0.5, 0.5]) ** (kernel_size - 1) + return torch.tensor(kernel.c, dtype=dtype, device=device).view(1, 1, kernel_size) + + +def average_filter2d(kernel_size: int, device: Optional[str] = None, dtype: Optional[type] = None) -> torch.Tensor: + r"""Creates 2D normalized average filter + + Args: + kernel_size (int): kernel size + device: target device for kernel generation + dtype: target data type for kernel generation + + Returns: + kernel: Tensor with shape (1, kernel_size, kernel_size) + """ + window = torch.ones(kernel_size, dtype=dtype, device=device) / kernel_size + kernel = window[:, None] * window[None, :] + return kernel.unsqueeze(0) diff --git a/pytorch_svgrender/libs/metric/piq/functional/layers.py b/pytorch_svgrender/libs/metric/piq/functional/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..3f0701dbc5acfc47e97aba32c0e04aa80c5bc8fc --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/functional/layers.py @@ -0,0 +1,33 @@ +r"""Custom layers used in metrics computations""" +import torch +from typing import Optional + +from .filters import hann_filter + + +class L2Pool2d(torch.nn.Module): + r"""Applies L2 pooling with Hann window of size 3x3 + Args: + x: Tensor with shape (N, C, H, W)""" + EPS = 1e-12 + + def __init__(self, kernel_size: int = 3, stride: int = 2, padding=1) -> None: + super().__init__() + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + + self.kernel: Optional[torch.Tensor] = None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.kernel is None: + C = x.size(1) + self.kernel = hann_filter(self.kernel_size).repeat((C, 1, 1, 1)).to(x) + + out = torch.nn.functional.conv2d( + x ** 2, self.kernel, + stride=self.stride, + padding=self.padding, + groups=x.shape[1] + ) + return (out + self.EPS).sqrt() diff --git a/pytorch_svgrender/libs/metric/piq/functional/resize.py b/pytorch_svgrender/libs/metric/piq/functional/resize.py new file mode 100644 index 0000000000000000000000000000000000000000..b3a39b45a10a71cc38ac07a89929cf2fad033239 --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/functional/resize.py @@ -0,0 +1,426 @@ +""" +A standalone PyTorch implementation for fast and efficient bicubic resampling. +The resulting values are the same to MATLAB function imresize('bicubic'). +## Author: Sanghyun Son +## Email: sonsang35@gmail.com (primary), thstkdgus35@snu.ac.kr (secondary) +## Version: 1.2.0 +## Last update: July 9th, 2020 (KST) +Dependency: torch +Example:: +>>> import torch +>>> import core +>>> x = torch.arange(16).float().view(1, 1, 4, 4) +>>> y = core.imresize(x, sizes=(3, 3)) +>>> print(y) +tensor([[[[ 0.7506, 2.1004, 3.4503], + [ 6.1505, 7.5000, 8.8499], + [11.5497, 12.8996, 14.2494]]]]) +""" + +import math +import typing + +import torch +from torch.nn import functional as F + +__all__ = ['imresize'] + +_I = typing.Optional[int] +_D = typing.Optional[torch.dtype] + + +def nearest_contribution(x: torch.Tensor) -> torch.Tensor: + range_around_0 = torch.logical_and(x.gt(-0.5), x.le(0.5)) + cont = range_around_0.to(dtype=x.dtype) + return cont + + +def linear_contribution(x: torch.Tensor) -> torch.Tensor: + ax = x.abs() + range_01 = ax.le(1) + cont = (1 - ax) * range_01.to(dtype=x.dtype) + return cont + + +def cubic_contribution(x: torch.Tensor, a: float = -0.5) -> torch.Tensor: + ax = x.abs() + ax2 = ax * ax + ax3 = ax * ax2 + + range_01 = ax.le(1) + range_12 = torch.logical_and(ax.gt(1), ax.le(2)) + + cont_01 = (a + 2) * ax3 - (a + 3) * ax2 + 1 + cont_01 = cont_01 * range_01.to(dtype=x.dtype) + + cont_12 = (a * ax3) - (5 * a * ax2) + (8 * a * ax) - (4 * a) + cont_12 = cont_12 * range_12.to(dtype=x.dtype) + + cont = cont_01 + cont_12 + return cont + + +def gaussian_contribution(x: torch.Tensor, sigma: float = 2.0) -> torch.Tensor: + range_3sigma = (x.abs() <= 3 * sigma + 1) + # Normalization will be done after + cont = torch.exp(-x.pow(2) / (2 * sigma ** 2)) + cont = cont * range_3sigma.to(dtype=x.dtype) + return cont + + +def discrete_kernel( + kernel: str, scale: float, antialiasing: bool = True) -> torch.Tensor: + ''' + For downsampling with integer scale only. + ''' + downsampling_factor = int(1 / scale) + if kernel == 'cubic': + kernel_size_orig = 4 + else: + raise ValueError('Pass!') + + if antialiasing: + kernel_size = kernel_size_orig * downsampling_factor + else: + kernel_size = kernel_size_orig + + if downsampling_factor % 2 == 0: + a = kernel_size_orig * (0.5 - 1 / (2 * kernel_size)) + else: + kernel_size -= 1 + a = kernel_size_orig * (0.5 - 1 / (kernel_size + 1)) + + with torch.no_grad(): + r = torch.linspace(-a, a, steps=kernel_size) + k = cubic_contribution(r).view(-1, 1) + k = torch.matmul(k, k.t()) + k /= k.sum() + + return k + + +def reflect_padding( + x: torch.Tensor, + dim: int, + pad_pre: int, + pad_post: int) -> torch.Tensor: + ''' + Apply reflect padding to the given Tensor. + Note that it is slightly different from the PyTorch functional.pad, + where boundary elements are used only once. + Instead, we follow the MATLAB implementation + which uses boundary elements twice. + For example, + [a, b, c, d] would become [b, a, b, c, d, c] with the PyTorch implementation, + while our implementation yields [a, a, b, c, d, d]. + ''' + b, c, h, w = x.size() + if dim == 2 or dim == -2: + padding_buffer = x.new_zeros(b, c, h + pad_pre + pad_post, w) + padding_buffer[..., pad_pre:(h + pad_pre), :].copy_(x) + for p in range(pad_pre): + padding_buffer[..., pad_pre - p - 1, :].copy_(x[..., p, :]) + for p in range(pad_post): + padding_buffer[..., h + pad_pre + p, :].copy_(x[..., -(p + 1), :]) + else: + padding_buffer = x.new_zeros(b, c, h, w + pad_pre + pad_post) + padding_buffer[..., pad_pre:(w + pad_pre)].copy_(x) + for p in range(pad_pre): + padding_buffer[..., pad_pre - p - 1].copy_(x[..., p]) + for p in range(pad_post): + padding_buffer[..., w + pad_pre + p].copy_(x[..., -(p + 1)]) + + return padding_buffer + + +def padding( + x: torch.Tensor, + dim: int, + pad_pre: int, + pad_post: int, + padding_type: typing.Optional[str] = 'reflect') -> torch.Tensor: + if padding_type is None: + return x + elif padding_type == 'reflect': + x_pad = reflect_padding(x, dim, pad_pre, pad_post) + else: + raise ValueError('{} padding is not supported!'.format(padding_type)) + + return x_pad + + +def get_padding( + base: torch.Tensor, + kernel_size: int, + x_size: int) -> typing.Tuple[int, int, torch.Tensor]: + base = base.long() + r_min = base.min() + r_max = base.max() + kernel_size - 1 + + if r_min <= 0: + pad_pre = -r_min + pad_pre = pad_pre.item() + base += pad_pre + else: + pad_pre = 0 + + if r_max >= x_size: + pad_post = r_max - x_size + 1 + pad_post = pad_post.item() + else: + pad_post = 0 + + return pad_pre, pad_post, base + + +def get_weight( + dist: torch.Tensor, + kernel_size: int, + kernel: str = 'cubic', + sigma: float = 2.0, + antialiasing_factor: float = 1) -> torch.Tensor: + buffer_pos = dist.new_zeros(kernel_size, len(dist)) + for idx, buffer_sub in enumerate(buffer_pos): + buffer_sub.copy_(dist - idx) + + # Expand (downsampling) / Shrink (upsampling) the receptive field. + buffer_pos *= antialiasing_factor + if kernel == 'cubic': + weight = cubic_contribution(buffer_pos) + elif kernel == 'gaussian': + weight = gaussian_contribution(buffer_pos, sigma=sigma) + else: + raise ValueError('{} kernel is not supported!'.format(kernel)) + + weight /= weight.sum(dim=0, keepdim=True) + return weight + + +def reshape_tensor(x: torch.Tensor, dim: int, kernel_size: int) -> torch.Tensor: + # Resize height + if dim == 2 or dim == -2: + k = (kernel_size, 1) + h_out = x.size(-2) - kernel_size + 1 + w_out = x.size(-1) + # Resize width + else: + k = (1, kernel_size) + h_out = x.size(-2) + w_out = x.size(-1) - kernel_size + 1 + + unfold = F.unfold(x, k) + unfold = unfold.view(unfold.size(0), -1, h_out, w_out) + return unfold + + +def reshape_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _I, _I, int, int]: + if x.dim() == 4: + b, c, h, w = x.size() + elif x.dim() == 3: + c, h, w = x.size() + b = None + elif x.dim() == 2: + h, w = x.size() + b = c = None + else: + raise ValueError('{}-dim Tensor is not supported!'.format(x.dim())) + + x = x.view(-1, 1, h, w) + return x, b, c, h, w + + +def reshape_output(x: torch.Tensor, b: _I, c: _I) -> torch.Tensor: + rh = x.size(-2) + rw = x.size(-1) + # Back to the original dimension + if b is not None: + x = x.view(b, c, rh, rw) # 4-dim + else: + if c is not None: + x = x.view(c, rh, rw) # 3-dim + else: + x = x.view(rh, rw) # 2-dim + + return x + + +def cast_input(x: torch.Tensor) -> typing.Tuple[torch.Tensor, _D]: + if x.dtype != torch.float32 or x.dtype != torch.float64: + dtype = x.dtype + x = x.float() + else: + dtype = None + + return x, dtype + + +def cast_output(x: torch.Tensor, dtype: _D) -> torch.Tensor: + if dtype is not None: + if not dtype.is_floating_point: + x = x.round() + # To prevent over/underflow when converting types + if dtype is torch.uint8: + x = x.clamp(0, 255) + + x = x.to(dtype=dtype) + + return x + + +def resize_1d( + x: torch.Tensor, + dim: int, + size: int, + scale: float, + kernel: str = 'cubic', + sigma: float = 2.0, + padding_type: str = 'reflect', + antialiasing: bool = True) -> torch.Tensor: + ''' + Args: + x (torch.Tensor): A torch.Tensor of dimension (B x C, 1, H, W). + dim (int): + scale (float): + size (int): + Return: + ''' + # Identity case + if scale == 1: + return x + + # Default bicubic kernel with antialiasing (only when downsampling) + if kernel == 'cubic': + kernel_size = 4 + else: + kernel_size = math.floor(6 * sigma) + + if antialiasing and (scale < 1): + antialiasing_factor = scale + kernel_size = math.ceil(kernel_size / antialiasing_factor) + else: + antialiasing_factor = 1 + + # We allow margin to both sizes + kernel_size += 2 + + # Weights only depend on the shape of input and output, + # so we do not calculate gradients here. + with torch.no_grad(): + pos = torch.linspace( + 0, size - 1, steps=size, dtype=x.dtype, device=x.device, + ) + pos = (pos + 0.5) / scale - 0.5 + base = pos.floor() - (kernel_size // 2) + 1 + dist = pos - base + weight = get_weight( + dist, + kernel_size, + kernel=kernel, + sigma=sigma, + antialiasing_factor=antialiasing_factor, + ) + pad_pre, pad_post, base = get_padding(base, kernel_size, x.size(dim)) + + # To backpropagate through x + x_pad = padding(x, dim, pad_pre, pad_post, padding_type=padding_type) + unfold = reshape_tensor(x_pad, dim, kernel_size) + # Subsampling first + if dim == 2 or dim == -2: + sample = unfold[..., base, :] + weight = weight.view(1, kernel_size, sample.size(2), 1) + else: + sample = unfold[..., base] + weight = weight.view(1, kernel_size, 1, sample.size(3)) + + # Apply the kernel + x = sample * weight + x = x.sum(dim=1, keepdim=True) + return x + + +def downsampling_2d( + x: torch.Tensor, + k: torch.Tensor, + scale: int, + padding_type: str = 'reflect') -> torch.Tensor: + c = x.size(1) + k_h = k.size(-2) + k_w = k.size(-1) + + k = k.to(dtype=x.dtype, device=x.device) + k = k.view(1, 1, k_h, k_w) + k = k.repeat(c, c, 1, 1) + e = torch.eye(c, dtype=k.dtype, device=k.device, requires_grad=False) + e = e.view(c, c, 1, 1) + k = k * e + + pad_h = (k_h - scale) // 2 + pad_w = (k_w - scale) // 2 + x = padding(x, -2, pad_h, pad_h, padding_type=padding_type) + x = padding(x, -1, pad_w, pad_w, padding_type=padding_type) + y = F.conv2d(x, k, padding=0, stride=scale) + return y + + +def imresize( + x: torch.Tensor, + scale: typing.Optional[float] = None, + sizes: typing.Optional[typing.Tuple[int, int]] = None, + kernel: typing.Union[str, torch.Tensor] = 'cubic', + sigma: float = 2, + rotation_degree: float = 0, + padding_type: str = 'reflect', + antialiasing: bool = True) -> torch.Tensor: + """ + Args: + x (torch.Tensor): + scale (float): + sizes (tuple(int, int)): + kernel (str, default='cubic'): + sigma (float, default=2): + rotation_degree (float, default=0): + padding_type (str, default='reflect'): + antialiasing (bool, default=True): + Return: + torch.Tensor: + """ + if scale is None and sizes is None: + raise ValueError('One of scale or sizes must be specified!') + if scale is not None and sizes is not None: + raise ValueError('Please specify scale or sizes to avoid conflict!') + + x, b, c, h, w = reshape_input(x) + + if sizes is None and scale is not None: + ''' + # Check if we can apply the convolution algorithm + scale_inv = 1 / scale + if isinstance(kernel, str) and scale_inv.is_integer(): + kernel = discrete_kernel(kernel, scale, antialiasing=antialiasing) + elif isinstance(kernel, torch.Tensor) and not scale_inv.is_integer(): + raise ValueError( + 'An integer downsampling factor ' + 'should be used with a predefined kernel!' + ) + ''' + # Determine output size + sizes = (math.ceil(h * scale), math.ceil(w * scale)) + scales = (scale, scale) + + if scale is None and sizes is not None: + scales = (sizes[0] / h, sizes[1] / w) + + x, dtype = cast_input(x) + + if isinstance(kernel, str) and sizes is not None: + # Core resizing module + x = resize_1d(x, -2, size=sizes[0], scale=scales[0], kernel=kernel, sigma=sigma, padding_type=padding_type, + antialiasing=antialiasing) + x = resize_1d(x, -1, size=sizes[1], scale=scales[1], kernel=kernel, sigma=sigma, padding_type=padding_type, + antialiasing=antialiasing) + elif isinstance(kernel, torch.Tensor) and scale is not None: + x = downsampling_2d(x, kernel, scale=int(1 / scale)) + + x = reshape_output(x, b, c) + x = cast_output(x, dtype) + return x diff --git a/pytorch_svgrender/libs/metric/piq/perceptual.py b/pytorch_svgrender/libs/metric/piq/perceptual.py new file mode 100644 index 0000000000000000000000000000000000000000..68a704d4f21fa569bcd6d1b4ce7862b780ba8d2a --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/perceptual.py @@ -0,0 +1,496 @@ +""" +Implementation of Content loss, Style loss, LPIPS and DISTS metrics +References: + .. [1] Gatys, Leon and Ecker, Alexander and Bethge, Matthias + (2016). A Neural Algorithm of Artistic Style} + Association for Research in Vision and Ophthalmology (ARVO) + https://arxiv.org/abs/1508.06576 + .. [2] Zhang, Richard and Isola, Phillip and Efros, et al. + (2018) The Unreasonable Effectiveness of Deep Features as a Perceptual Metric + 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition + https://arxiv.org/abs/1801.03924 +""" +from typing import List, Union, Collection + +import torch +import torch.nn as nn +from torch.nn.modules.loss import _Loss +from torchvision.models import vgg16, vgg19, VGG16_Weights, VGG19_Weights + +from .utils import _validate_input, _reduce +from .functional import similarity_map, L2Pool2d + +# Map VGG names to corresponding number in torchvision layer +VGG16_LAYERS = { + "conv1_1": '0', "relu1_1": '1', + "conv1_2": '2', "relu1_2": '3', + "pool1": '4', + "conv2_1": '5', "relu2_1": '6', + "conv2_2": '7', "relu2_2": '8', + "pool2": '9', + "conv3_1": '10', "relu3_1": '11', + "conv3_2": '12', "relu3_2": '13', + "conv3_3": '14', "relu3_3": '15', + "pool3": '16', + "conv4_1": '17', "relu4_1": '18', + "conv4_2": '19', "relu4_2": '20', + "conv4_3": '21', "relu4_3": '22', + "pool4": '23', + "conv5_1": '24', "relu5_1": '25', + "conv5_2": '26', "relu5_2": '27', + "conv5_3": '28', "relu5_3": '29', + "pool5": '30', +} + +VGG19_LAYERS = { + "conv1_1": '0', "relu1_1": '1', + "conv1_2": '2', "relu1_2": '3', + "pool1": '4', + "conv2_1": '5', "relu2_1": '6', + "conv2_2": '7', "relu2_2": '8', + "pool2": '9', + "conv3_1": '10', "relu3_1": '11', + "conv3_2": '12', "relu3_2": '13', + "conv3_3": '14', "relu3_3": '15', + "conv3_4": '16', "relu3_4": '17', + "pool3": '18', + "conv4_1": '19', "relu4_1": '20', + "conv4_2": '21', "relu4_2": '22', + "conv4_3": '23', "relu4_3": '24', + "conv4_4": '25', "relu4_4": '26', + "pool4": '27', + "conv5_1": '28', "relu5_1": '29', + "conv5_2": '30', "relu5_2": '31', + "conv5_3": '32', "relu5_3": '33', + "conv5_4": '34', "relu5_4": '35', + "pool5": '36', +} + +IMAGENET_MEAN = [0.485, 0.456, 0.406] +IMAGENET_STD = [0.229, 0.224, 0.225] + +# Constant used in feature normalization to avoid zero division +EPS = 1e-10 + + +class ContentLoss(_Loss): + r"""Creates Content loss that can be used for image style transfer or as a measure for image to image tasks. + Uses pretrained VGG models from torchvision. + Expects input to be in range [0, 1] or normalized with ImageNet statistics into range [-1, 1] + + Args: + feature_extractor: Model to extract features or model name: ``'vgg16'`` | ``'vgg19'``. + layers: List of strings with layer names. Default: ``'relu3_3'`` + weights: List of float weight to balance different layers + replace_pooling: Flag to replace MaxPooling layer with AveragePooling. See references for details. + distance: Method to compute distance between features: ``'mse'`` | ``'mae'``. + reduction: Specifies the reduction type: + ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` + mean: List of float values used for data standardization. Default: ImageNet mean. + If there is no need to normalize data, use [0., 0., 0.]. + std: List of float values used for data standardization. Default: ImageNet std. + If there is no need to normalize data, use [1., 1., 1.]. + normalize_features: If true, unit-normalize each feature in channel dimension before scaling + and computing distance. See references for details. + + Examples: + >>> loss = ContentLoss() + >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) + >>> y = torch.rand(3, 3, 256, 256) + >>> output = loss(x, y) + >>> output.backward() + + References: + Gatys, Leon and Ecker, Alexander and Bethge, Matthias (2016). + A Neural Algorithm of Artistic Style + Association for Research in Vision and Ophthalmology (ARVO) + https://arxiv.org/abs/1508.06576 + + Zhang, Richard and Isola, Phillip and Efros, et al. (2018) + The Unreasonable Effectiveness of Deep Features as a Perceptual Metric + IEEE/CVF Conference on Computer Vision and Pattern Recognition + https://arxiv.org/abs/1801.03924 + """ + + def __init__(self, feature_extractor: Union[str, torch.nn.Module] = "vgg16", layers: Collection[str] = ("relu3_3",), + weights: List[Union[float, torch.Tensor]] = [1.], replace_pooling: bool = False, + distance: str = "mse", reduction: str = "mean", mean: List[float] = IMAGENET_MEAN, + std: List[float] = IMAGENET_STD, normalize_features: bool = False, + allow_layers_weights_mismatch: bool = False) -> None: + + assert allow_layers_weights_mismatch or len(layers) == len(weights), \ + f'Lengths of provided layers and weighs mismatch ({len(weights)} weights and {len(layers)} layers), ' \ + f'which will cause incorrect results. Please provide weight for each layer.' + + super().__init__() + + if callable(feature_extractor): + self.model = feature_extractor + self.layers = layers + else: + if feature_extractor == "vgg16": + # self.model = vgg16(pretrained=True, progress=False).features + self.model = vgg16(weights=VGG16_Weights.DEFAULT, progress=False).features + self.layers = [VGG16_LAYERS[l] for l in layers] + elif feature_extractor == "vgg19": + # self.model = vgg19(pretrained=True, progress=False).features + self.model = vgg19(weights=VGG19_Weights.DEFAULT, progress=False).features + self.layers = [VGG19_LAYERS[l] for l in layers] + else: + raise ValueError("Unknown feature extractor") + + if replace_pooling: + self.model = self.replace_pooling(self.model) + + # Disable gradients + for param in self.model.parameters(): + param.requires_grad_(False) + + self.distance = { + "mse": nn.MSELoss, + "mae": nn.L1Loss, + }[distance](reduction='none') + + self.weights = [torch.tensor(w) if not isinstance(w, torch.Tensor) else w for w in weights] + + mean = torch.tensor(mean) + std = torch.tensor(std) + self.mean = mean.view(1, -1, 1, 1) + self.std = std.view(1, -1, 1, 1) + + self.normalize_features = normalize_features + self.reduction = reduction + + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + r"""Computation of Content loss between feature representations of prediction :math:`x` and + target :math:`y` tensors. + + Args: + x: An input tensor. Shape :math:`(N, C, H, W)`. + y: A target tensor. Shape :math:`(N, C, H, W)`. + + Returns: + Content loss between feature representations + """ + _validate_input([x, y], dim_range=(4, 4), data_range=(0, -1)) + + self.model.to(x) + x_features = self.get_features(x) + y_features = self.get_features(y) + + distances = self.compute_distance(x_features, y_features) + + # Scale distances, then average in spatial dimensions, then stack and sum in channels dimension + loss = torch.cat([(d * w.to(d)).mean(dim=[2, 3]) for d, w in zip(distances, self.weights)], dim=1).sum(dim=1) + + return _reduce(loss, self.reduction) + + def compute_distance(self, x_features: List[torch.Tensor], y_features: List[torch.Tensor]) -> List[torch.Tensor]: + r"""Take L2 or L1 distance between feature maps depending on ``distance``. + + Args: + x_features: Features of the input tensor. + y_features: Features of the target tensor. + + Returns: + Distance between feature maps + """ + return [self.distance(x, y) for x, y in zip(x_features, y_features)] + + def get_features(self, x: torch.Tensor) -> List[torch.Tensor]: + r""" + Args: + x: Tensor. Shape :math:`(N, C, H, W)`. + + Returns: + List of features extracted from intermediate layers + """ + # Normalize input + x = (x - self.mean.to(x)) / self.std.to(x) + + features = [] + for name, module in self.model._modules.items(): + x = module(x) + if name in self.layers: + features.append(self.normalize(x) if self.normalize_features else x) + + return features + + @staticmethod + def normalize(x: torch.Tensor) -> torch.Tensor: + r"""Normalize feature maps in channel direction to unit length. + + Args: + x: Tensor. Shape :math:`(N, C, H, W)`. + + Returns: + Normalized input + """ + norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)) + return x / (norm_factor + EPS) + + def replace_pooling(self, module: torch.nn.Module) -> torch.nn.Module: + r"""Turn All MaxPool layers into AveragePool + + Args: + module: Module to change MaxPool int AveragePool + + Returns: + Module with AveragePool instead MaxPool + + """ + module_output = module + if isinstance(module, torch.nn.MaxPool2d): + module_output = torch.nn.AvgPool2d(kernel_size=2, stride=2, padding=0) + + for name, child in module.named_children(): + module_output.add_module(name, self.replace_pooling(child)) + return module_output + + +class StyleLoss(ContentLoss): + r"""Creates Style loss that can be used for image style transfer or as a measure in + image to image tasks. Computes distance between Gram matrices of feature maps. + Uses pretrained VGG models from torchvision. + + By default expects input to be in range [0, 1], which is then normalized by ImageNet statistics into range [-1, 1]. + If no normalisation is required, change `mean` and `std` values accordingly. + + Args: + feature_extractor: Model to extract features or model name: ``'vgg16'`` | ``'vgg19'``. + layers: List of strings with layer names. Default: ``'relu3_3'`` + weights: List of float weight to balance different layers + replace_pooling: Flag to replace MaxPooling layer with AveragePooling. See references for details. + distance: Method to compute distance between features: ``'mse'`` | ``'mae'``. + reduction: Specifies the reduction type: + ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` + mean: List of float values used for data standardization. Default: ImageNet mean. + If there is no need to normalize data, use [0., 0., 0.]. + std: List of float values used for data standardization. Default: ImageNet std. + If there is no need to normalize data, use [1., 1., 1.]. + normalize_features: If true, unit-normalize each feature in channel dimension before scaling + and computing distance. See references for details. + + Examples: + >>> loss = StyleLoss() + >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) + >>> y = torch.rand(3, 3, 256, 256) + >>> output = loss(x, y) + >>> output.backward() + + References: + Gatys, Leon and Ecker, Alexander and Bethge, Matthias (2016). + A Neural Algorithm of Artistic Style + Association for Research in Vision and Ophthalmology (ARVO) + https://arxiv.org/abs/1508.06576 + + Zhang, Richard and Isola, Phillip and Efros, et al. (2018) + The Unreasonable Effectiveness of Deep Features as a Perceptual Metric + IEEE/CVF Conference on Computer Vision and Pattern Recognition + https://arxiv.org/abs/1801.03924 + """ + + def compute_distance(self, x_features: torch.Tensor, y_features: torch.Tensor): + r"""Take L2 or L1 distance between Gram matrices of feature maps depending on ``distance``. + + Args: + x_features: Features of the input tensor. + y_features: Features of the target tensor. + + Returns: + Distance between Gram matrices + """ + x_gram = [self.gram_matrix(x) for x in x_features] + y_gram = [self.gram_matrix(x) for x in y_features] + return [self.distance(x, y) for x, y in zip(x_gram, y_gram)] + + @staticmethod + def gram_matrix(x: torch.Tensor) -> torch.Tensor: + r"""Compute Gram matrix for batch of features. + + Args: + x: Tensor. Shape :math:`(N, C, H, W)`. + + Returns: + Gram matrix for given input + """ + B, C, H, W = x.size() + gram = [] + for i in range(B): + features = x[i].view(C, H * W) + + # Add fake channel dimension + gram.append(torch.mm(features, features.t()).unsqueeze(0)) + + return torch.stack(gram) + + +class LPIPS(ContentLoss): + r"""Learned Perceptual Image Patch Similarity metric. Only VGG16 learned weights are supported. + + By default expects input to be in range [0, 1], which is then normalized by ImageNet statistics into range [-1, 1]. + If no normalisation is required, change `mean` and `std` values accordingly. + + Args: + replace_pooling: Flag to replace MaxPooling layer with AveragePooling. See references for details. + distance: Method to compute distance between features: ``'mse'`` | ``'mae'``. + reduction: Specifies the reduction type: + ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` + mean: List of float values used for data standardization. Default: ImageNet mean. + If there is no need to normalize data, use [0., 0., 0.]. + std: List of float values used for data standardization. Default: ImageNet std. + If there is no need to normalize data, use [1., 1., 1.]. + + Examples: + >>> loss = LPIPS() + >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) + >>> y = torch.rand(3, 3, 256, 256) + >>> output = loss(x, y) + >>> output.backward() + + References: + Gatys, Leon and Ecker, Alexander and Bethge, Matthias (2016). + A Neural Algorithm of Artistic Style + Association for Research in Vision and Ophthalmology (ARVO) + https://arxiv.org/abs/1508.06576 + + Zhang, Richard and Isola, Phillip and Efros, et al. (2018) + The Unreasonable Effectiveness of Deep Features as a Perceptual Metric + IEEE/CVF Conference on Computer Vision and Pattern Recognition + https://arxiv.org/abs/1801.03924 + https://github.com/richzhang/PerceptualSimilarity + """ + _weights_url = "https://github.com/photosynthesis-team/" + \ + "photosynthesis.metrics/releases/download/v0.4.0/lpips_weights.pt" + + def __init__(self, replace_pooling: bool = False, distance: str = "mse", reduction: str = "mean", + mean: List[float] = IMAGENET_MEAN, std: List[float] = IMAGENET_STD, ) -> None: + lpips_layers = ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'] + lpips_weights = torch.hub.load_state_dict_from_url(self._weights_url, progress=False) + super().__init__("vgg16", layers=lpips_layers, weights=lpips_weights, + replace_pooling=replace_pooling, distance=distance, + reduction=reduction, mean=mean, std=std, + normalize_features=True) + + +class DISTS(ContentLoss): + r"""Deep Image Structure and Texture Similarity metric. + + By default expects input to be in range [0, 1], which is then normalized by ImageNet statistics into range [-1, 1]. + If no normalisation is required, change `mean` and `std` values accordingly. + + Args: + reduction: Specifies the reduction type: + ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'`` + mean: List of float values used for data standardization. Default: ImageNet mean. + If there is no need to normalize data, use [0., 0., 0.]. + std: List of float values used for data standardization. Default: ImageNet std. + If there is no need to normalize data, use [1., 1., 1.]. + + Examples: + >>> loss = DISTS() + >>> x = torch.rand(3, 3, 256, 256, requires_grad=True) + >>> y = torch.rand(3, 3, 256, 256) + >>> output = loss(x, y) + >>> output.backward() + + References: + Keyan Ding, Kede Ma, Shiqi Wang, Eero P. Simoncelli (2020). + Image Quality Assessment: Unifying Structure and Texture Similarity. + https://arxiv.org/abs/2004.07728 + https://github.com/dingkeyan93/DISTS + """ + _weights_url = "https://github.com/photosynthesis-team/piq/releases/download/v0.4.1/dists_weights.pt" + + def __init__(self, reduction: str = "mean", mean: List[float] = IMAGENET_MEAN, + std: List[float] = IMAGENET_STD) -> None: + dists_layers = ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'] + channels = [3, 64, 128, 256, 512, 512] + + weights = torch.hub.load_state_dict_from_url(self._weights_url, progress=False) + dists_weights = list(torch.split(weights['alpha'], channels, dim=1)) + dists_weights.extend(torch.split(weights['beta'], channels, dim=1)) + + super().__init__("vgg16", layers=dists_layers, weights=dists_weights, + replace_pooling=True, reduction=reduction, mean=mean, std=std, + normalize_features=False, allow_layers_weights_mismatch=True) + + def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: + r""" + + Args: + x: An input tensor. Shape :math:`(N, C, H, W)`. + y: A target tensor. Shape :math:`(N, C, H, W)`. + + Returns: + Deep Image Structure and Texture Similarity loss, i.e. ``1-DISTS`` in range [0, 1]. + """ + _, _, H, W = x.shape + + if min(H, W) > 256: + x = torch.nn.functional.interpolate( + x, scale_factor=256 / min(H, W), recompute_scale_factor=False, mode='bilinear') + y = torch.nn.functional.interpolate( + y, scale_factor=256 / min(H, W), recompute_scale_factor=False, mode='bilinear') + + loss = super().forward(x, y) + return 1 - loss + + def compute_distance(self, x_features: torch.Tensor, y_features: torch.Tensor) -> List[torch.Tensor]: + r"""Compute structure similarity between feature maps + + Args: + x_features: Features of the input tensor. + y_features: Features of the target tensor. + + Returns: + Structural similarity distance between feature maps + """ + structure_distance, texture_distance = [], [] + # Small constant for numerical stability + EPS = 1e-6 + + for x, y in zip(x_features, y_features): + x_mean = x.mean([2, 3], keepdim=True) + y_mean = y.mean([2, 3], keepdim=True) + structure_distance.append(similarity_map(x_mean, y_mean, constant=EPS)) + + x_var = ((x - x_mean) ** 2).mean([2, 3], keepdim=True) + y_var = ((y - y_mean) ** 2).mean([2, 3], keepdim=True) + xy_cov = (x * y).mean([2, 3], keepdim=True) - x_mean * y_mean + texture_distance.append((2 * xy_cov + EPS) / (x_var + y_var + EPS)) + + return structure_distance + texture_distance + + def get_features(self, x: torch.Tensor) -> List[torch.Tensor]: + r""" + + Args: + x: Input tensor + + Returns: + List of features extracted from input tensor + """ + features = super().get_features(x) + + # Add input tensor as an additional feature + features.insert(0, x) + return features + + def replace_pooling(self, module: torch.nn.Module) -> torch.nn.Module: + r"""Turn All MaxPool layers into L2Pool + + Args: + module: Module to change MaxPool into L2Pool + + Returns: + Module with L2Pool instead of MaxPool + """ + module_output = module + if isinstance(module, torch.nn.MaxPool2d): + module_output = L2Pool2d(kernel_size=3, stride=2, padding=1) + + for name, child in module.named_children(): + module_output.add_module(name, self.replace_pooling(child)) + + return module_output diff --git a/pytorch_svgrender/libs/metric/piq/utils/__init__.py b/pytorch_svgrender/libs/metric/piq/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4ab6241444c024e5daa7b90190a45f481a66b69b --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/utils/__init__.py @@ -0,0 +1,7 @@ +from .common import _validate_input, _reduce, _parse_version + +__all__ = [ + "_validate_input", + "_reduce", + '_parse_version' +] diff --git a/pytorch_svgrender/libs/metric/piq/utils/common.py b/pytorch_svgrender/libs/metric/piq/utils/common.py new file mode 100644 index 0000000000000000000000000000000000000000..1ceb336a52669616ae5609941d90c916997a53eb --- /dev/null +++ b/pytorch_svgrender/libs/metric/piq/utils/common.py @@ -0,0 +1,158 @@ +import torch +import re +import warnings + +from typing import Tuple, List, Optional, Union, Dict, Any + +SEMVER_VERSION_PATTERN = re.compile( + r""" + ^ + (?P<major>0|[1-9]\d*) + \. + (?P<minor>0|[1-9]\d*) + \. + (?P<patch>0|[1-9]\d*) + (?:-(?P<prerelease> + (?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*) + (?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))* + ))? + (?:\+(?P<build> + [0-9a-zA-Z-]+ + (?:\.[0-9a-zA-Z-]+)* + ))? + $ + """, + re.VERBOSE, +) + + +PEP_440_VERSION_PATTERN = r""" + v? + (?: + (?:(?P<epoch>[0-9]+)!)? # epoch + (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment + (?P<pre> # pre-release + [-_\.]? + (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) + [-_\.]? + (?P<pre_n>[0-9]+)? + )? + (?P<post> # post release + (?:-(?P<post_n1>[0-9]+)) + | + (?: + [-_\.]? + (?P<post_l>post|rev|r) + [-_\.]? + (?P<post_n2>[0-9]+)? + ) + )? + (?P<dev> # dev release + [-_\.]? + (?P<dev_l>dev) + [-_\.]? + (?P<dev_n>[0-9]+)? + )? + ) + (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version +""" + + +def _validate_input( + tensors: List[torch.Tensor], + dim_range: Tuple[int, int] = (0, -1), + data_range: Tuple[float, float] = (0., -1.), + # size_dim_range: Tuple[float, float] = (0., -1.), + size_range: Optional[Tuple[int, int]] = None, +) -> None: + r"""Check that input(-s) satisfies the requirements + Args: + tensors: Tensors to check + dim_range: Allowed number of dimensions. (min, max) + data_range: Allowed range of values in tensors. (min, max) + size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1) + """ + + if not __debug__: + return + + x = tensors[0] + + for t in tensors: + assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}' + assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}' + + if size_range is None: + assert t.size() == x.size(), f'Expected tensors with same size, got {t.size()} and {x.size()}' + else: + assert t.size()[size_range[0]: size_range[1]] == x.size()[size_range[0]: size_range[1]], \ + f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}' + + if dim_range[0] == dim_range[1]: + assert t.dim() == dim_range[0], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}' + elif dim_range[0] < dim_range[1]: + assert dim_range[0] <= t.dim() <= dim_range[1], \ + f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}' + + if data_range[0] < data_range[1]: + assert data_range[0] <= t.min(), \ + f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}' + assert t.max() <= data_range[1], \ + f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}' + + +def _reduce(x: torch.Tensor, reduction: str = 'mean') -> torch.Tensor: + r"""Reduce input in batch dimension if needed. + + Args: + x: Tensor with shape (N, *). + reduction: Specifies the reduction type: + ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'`` + """ + if reduction == 'none': + return x + elif reduction == 'mean': + return x.mean(dim=0) + elif reduction == 'sum': + return x.sum(dim=0) + else: + raise ValueError("Unknown reduction. Expected one of {'none', 'mean', 'sum'}") + + +def _parse_version(version: Union[str, bytes]) -> Tuple[int, ...]: + """ Parses valid Python versions according to Semver and PEP 440 specifications. + For more on Semver check: https://semver.org/ + For more on PEP 440 check: https://www.python.org/dev/peps/pep-0440/. + + Implementation is inspired by: + - https://github.com/python-semver + - https://github.com/pypa/packaging + + Args: + version: unparsed information about the library of interest. + + Returns: + parsed information about the library of interest. + """ + if isinstance(version, bytes): + version = version.decode("UTF-8") + elif not isinstance(version, str) and not isinstance(version, bytes): + raise TypeError(f"not expecting type {type(version)}") + + # Semver processing + match = SEMVER_VERSION_PATTERN.match(version) + if match: + matched_version_parts: Dict[str, Any] = match.groupdict() + release = tuple([int(matched_version_parts[k]) for k in ['major', 'minor', 'patch']]) + return release + + # PEP 440 processing + regex = re.compile(r"^\s*" + PEP_440_VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) + match = regex.search(version) + + if match is None: + warnings.warn(f"{version} is not a valid SemVer or PEP 440 string") + return tuple() + + release = tuple(int(i) for i in match.group("release").split(".")) + return release diff --git a/pytorch_svgrender/libs/metric/pytorch_fid/__init__.py b/pytorch_svgrender/libs/metric/pytorch_fid/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..782e20db5f2769a78f8783031cbcd327437ece9a --- /dev/null +++ b/pytorch_svgrender/libs/metric/pytorch_fid/__init__.py @@ -0,0 +1,54 @@ +__version__ = '0.3.0' + +import torch +from einops import rearrange, repeat + +from .inception import InceptionV3 +from .fid_score import calculate_frechet_distance + + +class PytorchFIDFactory(torch.nn.Module): + """ + + Args: + channels: + inception_block_idx: + + Examples: + >>> fid_factory = PytorchFIDFactory() + >>> fid_score = fid_factory.score(real_samples=data, fake_samples=all_images) + >>> print(fid_score) + """ + + def __init__(self, channels: int = 3, inception_block_idx: int = 2048): + super().__init__() + self.channels = channels + + # load models + assert inception_block_idx in InceptionV3.BLOCK_INDEX_BY_DIM + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[inception_block_idx] + self.inception_v3 = InceptionV3([block_idx]) + + @torch.no_grad() + def calculate_activation_statistics(self, samples): + features = self.inception_v3(samples)[0] + features = rearrange(features, '... 1 1 -> ...') + + mu = torch.mean(features, dim=0).cpu() + sigma = torch.cov(features).cpu() + return mu, sigma + + def score(self, real_samples, fake_samples): + if self.channels == 1: + real_samples, fake_samples = map( + lambda t: repeat(t, 'b 1 ... -> b c ...', c=3), (real_samples, fake_samples) + ) + + min_batch = min(real_samples.shape[0], fake_samples.shape[0]) + real_samples, fake_samples = map(lambda t: t[:min_batch], (real_samples, fake_samples)) + + m1, s1 = self.calculate_activation_statistics(real_samples) + m2, s2 = self.calculate_activation_statistics(fake_samples) + + fid_value = calculate_frechet_distance(m1, s1, m2, s2) + return fid_value diff --git a/pytorch_svgrender/libs/metric/pytorch_fid/fid_score.py b/pytorch_svgrender/libs/metric/pytorch_fid/fid_score.py new file mode 100755 index 0000000000000000000000000000000000000000..117e0c77d25afded5e63429bb0a27a71967530f5 --- /dev/null +++ b/pytorch_svgrender/libs/metric/pytorch_fid/fid_score.py @@ -0,0 +1,322 @@ +"""Calculates the Frechet Inception Distance (FID) to evalulate GANs + +The FID metric calculates the distance between two distributions of images. +Typically, we have summary statistics (mean & covariance matrix) of one +of these distributions, while the 2nd distribution is given by a GAN. + +When run as a stand-alone program, it compares the distribution of +images that are stored as PNG/JPEG at a specified location with a +distribution given by summary statistics (in pickle format). + +The FID is calculated by assuming that X_1 and X_2 are the activations of +the pool_3 layer of the inception net for generated samples and real world +samples respectively. + +See --help to see further details. + +Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead +of Tensorflow + +Copyright 2018 Institute of Bioinformatics, JKU Linz + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import os +import pathlib +from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser + +import numpy as np +import torch +import torchvision.transforms as TF +from PIL import Image +from scipy import linalg +from torch.nn.functional import adaptive_avg_pool2d + +try: + from tqdm import tqdm +except ImportError: + # If tqdm is not available, provide a mock version of it + def tqdm(x): + return x + +from .inception import InceptionV3 + +parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) +parser.add_argument('--batch-size', type=int, default=50, + help='Batch size to use') +parser.add_argument('--num-workers', type=int, + help=('Number of processes to use for data loading. ' + 'Defaults to `min(8, num_cpus)`')) +parser.add_argument('--device', type=str, default=None, + help='Device to use. Like cuda, cuda:0 or cpu') +parser.add_argument('--dims', type=int, default=2048, + choices=list(InceptionV3.BLOCK_INDEX_BY_DIM), + help=('Dimensionality of Inception features to use. ' + 'By default, uses pool3 features')) +parser.add_argument('--save-stats', action='store_true', + help=('Generate an npz archive from a directory of samples. ' + 'The first path is used as input and the second as output.')) +parser.add_argument('path', type=str, nargs=2, + help=('Paths to the generated images or ' + 'to .npz statistic files')) + +IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm', + 'tif', 'tiff', 'webp'} + + +class ImagePathDataset(torch.utils.data.Dataset): + def __init__(self, files, transforms=None): + self.files = files + self.transforms = transforms + + def __len__(self): + return len(self.files) + + def __getitem__(self, i): + path = self.files[i] + img = Image.open(path).convert('RGB') + if self.transforms is not None: + img = self.transforms(img) + return img + + +def get_activations(files, model, batch_size=50, dims=2048, device='cpu', + num_workers=1): + """Calculates the activations of the pool_3 layer for all images. + + Params: + -- files : List of image files paths + -- model : Instance of inception model + -- batch_size : Batch size of images for the model to process at once. + Make sure that the number of samples is a multiple of + the batch size, otherwise some samples are ignored. This + behavior is retained to match the original FID score + implementation. + -- dims : Dimensionality of features returned by Inception + -- device : Device to run calculations + -- num_workers : Number of parallel dataloader workers + + Returns: + -- A numpy array of dimension (num images, dims) that contains the + activations of the given tensor when feeding inception with the + query tensor. + """ + model.eval() + + if batch_size > len(files): + print(('Warning: batch size is bigger than the data size. ' + 'Setting batch size to data size')) + batch_size = len(files) + + dataset = ImagePathDataset(files, transforms=TF.ToTensor()) + dataloader = torch.utils.data.DataLoader(dataset, + batch_size=batch_size, + shuffle=False, + drop_last=False, + num_workers=num_workers) + + pred_arr = np.empty((len(files), dims)) + + start_idx = 0 + + for batch in tqdm(dataloader): + batch = batch.to(device) + + with torch.no_grad(): + pred = model(batch)[0] + + # If model output is not scalar, apply global spatial average pooling. + # This happens if you choose a dimensionality not equal 2048. + if pred.size(2) != 1 or pred.size(3) != 1: + pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) + + pred = pred.squeeze(3).squeeze(2).cpu().numpy() + + pred_arr[start_idx:start_idx + pred.shape[0]] = pred + + start_idx = start_idx + pred.shape[0] + + return pred_arr + + +def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): + """Numpy implementation of the Frechet Distance. + The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) + and X_2 ~ N(mu_2, C_2) is + d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). + + Stable version by Dougal J. Sutherland. + + Params: + -- mu1 : Numpy array containing the activations of a layer of the + inception net (like returned by the function 'get_predictions') + for generated samples. + -- mu2 : The sample mean over activations, precalculated on an + representative data set. + -- sigma1: The covariance matrix over activations for generated samples. + -- sigma2: The covariance matrix over activations, precalculated on an + representative data set. + + Returns: + -- : The Frechet Distance. + """ + + mu1 = np.atleast_1d(mu1) + mu2 = np.atleast_1d(mu2) + + sigma1 = np.atleast_2d(sigma1) + sigma2 = np.atleast_2d(sigma2) + + assert mu1.shape == mu2.shape, \ + 'Training and test mean vectors have different lengths' + assert sigma1.shape == sigma2.shape, \ + 'Training and test covariances have different dimensions' + + diff = mu1 - mu2 + + # Product might be almost singular + covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) + if not np.isfinite(covmean).all(): + msg = ('fid calculation produces singular product; ' + 'adding %s to diagonal of cov estimates') % eps + print(msg) + offset = np.eye(sigma1.shape[0]) * eps + covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) + + # Numerical error might give slight imaginary component + if np.iscomplexobj(covmean): + if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): + m = np.max(np.abs(covmean.imag)) + raise ValueError('Imaginary component {}'.format(m)) + covmean = covmean.real + + tr_covmean = np.trace(covmean) + + return (diff.dot(diff) + np.trace(sigma1) + + np.trace(sigma2) - 2 * tr_covmean) + + +def calculate_activation_statistics(files, model, batch_size=50, dims=2048, + device='cpu', num_workers=1): + """Calculation of the statistics used by the FID. + Params: + -- files : List of image files paths + -- model : Instance of inception model + -- batch_size : The images numpy array is split into batches with + batch size batch_size. A reasonable batch size + depends on the hardware. + -- dims : Dimensionality of features returned by Inception + -- device : Device to run calculations + -- num_workers : Number of parallel dataloader workers + + Returns: + -- mu : The mean over samples of the activations of the pool_3 layer of + the inception model. + -- sigma : The covariance matrix of the activations of the pool_3 layer of + the inception model. + """ + act = get_activations(files, model, batch_size, dims, device, num_workers) + mu = np.mean(act, axis=0) + sigma = np.cov(act, rowvar=False) + return mu, sigma + + +def compute_statistics_of_path(path, model, batch_size, dims, device, + num_workers=1): + if path.endswith('.npz'): + with np.load(path) as f: + m, s = f['mu'][:], f['sigma'][:] + else: + path = pathlib.Path(path) + files = sorted([file for ext in IMAGE_EXTENSIONS + for file in path.glob('*.{}'.format(ext))]) + m, s = calculate_activation_statistics(files, model, batch_size, + dims, device, num_workers) + + return m, s + + +def calculate_fid_given_paths(paths, batch_size, device, dims, num_workers=1): + """Calculates the FID of two paths""" + for p in paths: + if not os.path.exists(p): + raise RuntimeError('Invalid path: %s' % p) + + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] + + model = InceptionV3([block_idx]).to(device) + + m1, s1 = compute_statistics_of_path(paths[0], model, batch_size, + dims, device, num_workers) + m2, s2 = compute_statistics_of_path(paths[1], model, batch_size, + dims, device, num_workers) + fid_value = calculate_frechet_distance(m1, s1, m2, s2) + + return fid_value + + +def save_fid_stats(paths, batch_size, device, dims, num_workers=1): + """Calculates the FID of two paths""" + if not os.path.exists(paths[0]): + raise RuntimeError('Invalid path: %s' % paths[0]) + + if os.path.exists(paths[1]): + raise RuntimeError('Existing output file: %s' % paths[1]) + + block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] + + model = InceptionV3([block_idx]).to(device) + + print(f"Saving statistics for {paths[0]}") + + m1, s1 = compute_statistics_of_path(paths[0], model, batch_size, + dims, device, num_workers) + + np.savez_compressed(paths[1], mu=m1, sigma=s1) + + +def main(): + args = parser.parse_args() + + if args.device is None: + device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu') + else: + device = torch.device(args.device) + + if args.num_workers is None: + try: + num_cpus = len(os.sched_getaffinity(0)) + except AttributeError: + # os.sched_getaffinity is not available under Windows, use + # os.cpu_count instead (which may not return the *available* number + # of CPUs). + num_cpus = os.cpu_count() + + num_workers = min(num_cpus, 8) if num_cpus is not None else 0 + else: + num_workers = args.num_workers + + if args.save_stats: + save_fid_stats(args.path, args.batch_size, device, args.dims, num_workers) + return + + fid_value = calculate_fid_given_paths(args.path, + args.batch_size, + device, + args.dims, + num_workers) + print('FID: ', fid_value) + + +if __name__ == '__main__': + main() diff --git a/pytorch_svgrender/libs/metric/pytorch_fid/inception.py b/pytorch_svgrender/libs/metric/pytorch_fid/inception.py new file mode 100644 index 0000000000000000000000000000000000000000..8898a20c0609f5bb31df3641e783ea95db45b95f --- /dev/null +++ b/pytorch_svgrender/libs/metric/pytorch_fid/inception.py @@ -0,0 +1,341 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + from torchvision.models.utils import load_state_dict_from_url +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url + +# Inception weights ported to Pytorch from +# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz +FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' # noqa: E501 + + +class InceptionV3(nn.Module): + """Pretrained InceptionV3 network returning feature maps""" + + # Index of default block of inception to return, + # corresponds to output of final average pooling + DEFAULT_BLOCK_INDEX = 3 + + # Maps feature dimensionality to their output blocks indices + BLOCK_INDEX_BY_DIM = { + 64: 0, # First max pooling features + 192: 1, # Second max pooling featurs + 768: 2, # Pre-aux classifier features + 2048: 3 # Final average pooling features + } + + def __init__(self, + output_blocks=(DEFAULT_BLOCK_INDEX,), + resize_input=True, + normalize_input=True, + requires_grad=False, + use_fid_inception=True): + """Build pretrained InceptionV3 + + Parameters + ---------- + output_blocks : list of int + Indices of blocks to return features of. Possible values are: + - 0: corresponds to output of first max pooling + - 1: corresponds to output of second max pooling + - 2: corresponds to output which is fed to aux classifier + - 3: corresponds to output of final average pooling + resize_input : bool + If true, bilinearly resizes input to width and height 299 before + feeding input to model. As the network without fully connected + layers is fully convolutional, it should be able to handle inputs + of arbitrary size, so resizing might not be strictly needed + normalize_input : bool + If true, scales the input from range (0, 1) to the range the + pretrained Inception network expects, namely (-1, 1) + requires_grad : bool + If true, parameters of the model require gradients. Possibly useful + for finetuning the network + use_fid_inception : bool + If true, uses the pretrained Inception model used in Tensorflow's + FID implementation. If false, uses the pretrained Inception model + available in torchvision. The FID Inception model has different + weights and a slightly different structure from torchvision's + Inception model. If you want to compute FID scores, you are + strongly advised to set this parameter to true to get comparable + results. + """ + super(InceptionV3, self).__init__() + + self.resize_input = resize_input + self.normalize_input = normalize_input + self.output_blocks = sorted(output_blocks) + self.last_needed_block = max(output_blocks) + + assert self.last_needed_block <= 3, \ + 'Last possible output block index is 3' + + self.blocks = nn.ModuleList() + + if use_fid_inception: + inception = fid_inception_v3() + else: + inception = _inception_v3(weights='DEFAULT') + + # Block 0: input to maxpool1 + block0 = [ + inception.Conv2d_1a_3x3, + inception.Conv2d_2a_3x3, + inception.Conv2d_2b_3x3, + nn.MaxPool2d(kernel_size=3, stride=2) + ] + self.blocks.append(nn.Sequential(*block0)) + + # Block 1: maxpool1 to maxpool2 + if self.last_needed_block >= 1: + block1 = [ + inception.Conv2d_3b_1x1, + inception.Conv2d_4a_3x3, + nn.MaxPool2d(kernel_size=3, stride=2) + ] + self.blocks.append(nn.Sequential(*block1)) + + # Block 2: maxpool2 to aux classifier + if self.last_needed_block >= 2: + block2 = [ + inception.Mixed_5b, + inception.Mixed_5c, + inception.Mixed_5d, + inception.Mixed_6a, + inception.Mixed_6b, + inception.Mixed_6c, + inception.Mixed_6d, + inception.Mixed_6e, + ] + self.blocks.append(nn.Sequential(*block2)) + + # Block 3: aux classifier to final avgpool + if self.last_needed_block >= 3: + block3 = [ + inception.Mixed_7a, + inception.Mixed_7b, + inception.Mixed_7c, + nn.AdaptiveAvgPool2d(output_size=(1, 1)) + ] + self.blocks.append(nn.Sequential(*block3)) + + for param in self.parameters(): + param.requires_grad = requires_grad + + def forward(self, inp): + """Get Inception feature maps + + Parameters + ---------- + inp : torch.autograd.Variable + Input tensor of shape Bx3xHxW. Values are expected to be in + range (0, 1) + + Returns + ------- + List of torch.autograd.Variable, corresponding to the selected output + block, sorted ascending by index + """ + outp = [] + x = inp + + if self.resize_input: + x = F.interpolate(x, + size=(299, 299), + mode='bilinear', + align_corners=False) + + if self.normalize_input: + x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) + + for idx, block in enumerate(self.blocks): + x = block(x) + if idx in self.output_blocks: + outp.append(x) + + if idx == self.last_needed_block: + break + + return outp + + +def _inception_v3(*args, **kwargs): + """Wraps `torchvision.models.inception_v3`""" + try: + version = tuple(map(int, torchvision.__version__.split('.')[:2])) + except ValueError: + # Just a caution against weird version strings + version = (0,) + + # Skips default weight inititialization if supported by torchvision + # version. See https://github.com/mseitzer/pytorch-fid/issues/28. + if version >= (0, 6): + kwargs['init_weights'] = False + + # Backwards compatibility: `weights` argument was handled by `pretrained` + # argument prior to version 0.13. + if version < (0, 13) and 'weights' in kwargs: + if kwargs['weights'] == 'DEFAULT': + kwargs['pretrained'] = True + elif kwargs['weights'] is None: + kwargs['pretrained'] = False + else: + raise ValueError( + 'weights=={} not supported in torchvision {}'.format( + kwargs['weights'], torchvision.__version__ + ) + ) + del kwargs['weights'] + + return torchvision.models.inception_v3(*args, **kwargs) + + +def fid_inception_v3(): + """Build pretrained Inception model for FID computation + + The Inception model for FID computation uses a different set of weights + and has a slightly different structure than torchvision's Inception. + + This method first constructs torchvision's Inception and then patches the + necessary parts that are different in the FID Inception model. + """ + inception = _inception_v3(num_classes=1008, + aux_logits=False, + weights=None) + inception.Mixed_5b = FIDInceptionA(192, pool_features=32) + inception.Mixed_5c = FIDInceptionA(256, pool_features=64) + inception.Mixed_5d = FIDInceptionA(288, pool_features=64) + inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) + inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) + inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) + inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) + inception.Mixed_7b = FIDInceptionE_1(1280) + inception.Mixed_7c = FIDInceptionE_2(2048) + + state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True) + inception.load_state_dict(state_dict) + return inception + + +class FIDInceptionA(torchvision.models.inception.InceptionA): + """InceptionA block patched for FID computation""" + def __init__(self, in_channels, pool_features): + super(FIDInceptionA, self).__init__(in_channels, pool_features) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + # Patch: Tensorflow's average pool does not use the padded zero's in + # its average calculation + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, + count_include_pad=False) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class FIDInceptionC(torchvision.models.inception.InceptionC): + """InceptionC block patched for FID computation""" + def __init__(self, in_channels, channels_7x7): + super(FIDInceptionC, self).__init__(in_channels, channels_7x7) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + # Patch: Tensorflow's average pool does not use the padded zero's in + # its average calculation + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, + count_include_pad=False) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return torch.cat(outputs, 1) + + +class FIDInceptionE_1(torchvision.models.inception.InceptionE): + """First InceptionE block patched for FID computation""" + def __init__(self, in_channels): + super(FIDInceptionE_1, self).__init__(in_channels) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + # Patch: Tensorflow's average pool does not use the padded zero's in + # its average calculation + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, + count_include_pad=False) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class FIDInceptionE_2(torchvision.models.inception.InceptionE): + """Second InceptionE block patched for FID computation""" + def __init__(self, in_channels): + super(FIDInceptionE_2, self).__init__(in_channels) + + def forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + # Patch: The FID Inception model uses max pooling instead of average + # pooling. This is likely an error in this specific Inception + # implementation, as other Inception models use average pooling here + # (which matches the description in the paper). + branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) diff --git a/pytorch_svgrender/libs/modules/__init__.py b/pytorch_svgrender/libs/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad761f2f5443eb41b15afc4116a66ecdfa9d918 --- /dev/null +++ b/pytorch_svgrender/libs/modules/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: diff --git a/pytorch_svgrender/libs/modules/edge_map/DoG/XDoG.py b/pytorch_svgrender/libs/modules/edge_map/DoG/XDoG.py new file mode 100644 index 0000000000000000000000000000000000000000..4553df9deec12af5b88cee4701300a95ab17ebdc --- /dev/null +++ b/pytorch_svgrender/libs/modules/edge_map/DoG/XDoG.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import numpy as np +import cv2 +from scipy import ndimage as ndi +from skimage import filters + + +class XDoG: + + def __init__(self, + gamma=0.98, + phi=200, + eps=-0.1, + sigma=0.8, + k=10, + binarize: bool = True): + """ + XDoG algorithm. + + Args: + gamma: Control the size of the Gaussian filter + phi: Control changes in edge strength + eps: Threshold for controlling edge strength + sigma: The standard deviation of the Gaussian filter controls the degree of smoothness + k: Control the size ratio of Gaussian filter, (k=10 or k=1.6) + binarize(bool): Whether to binarize the output + """ + + super(XDoG, self).__init__() + + self.gamma = gamma + assert 0 <= self.gamma <= 1 + + self.phi = phi + assert 0 <= self.phi <= 1500 + + self.eps = eps + assert -1 <= self.eps <= 1 + + self.sigma = sigma + assert 0.1 <= self.sigma <= 10 + + self.k = k + assert 1 <= self.k <= 100 + + self.binarize = binarize + + def __call__(self, img): + # to gray if image is not already grayscale + if len(img.shape) == 3 and img.shape[2] == 3: + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + elif len(img.shape) == 3 and img.shape[2] == 4: + img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY) + + if np.isnan(img).any(): + img[np.isnan(img)] = np.mean(img[~np.isnan(img)]) + + # gaussian filter + imf1 = ndi.gaussian_filter(img, self.sigma) + imf2 = ndi.gaussian_filter(img, self.sigma * self.k) + imdiff = imf1 - self.gamma * imf2 + + # XDoG + imdiff = (imdiff < self.eps) * 1.0 + (imdiff >= self.eps) * (1.0 + np.tanh(self.phi * imdiff)) + + # normalize + imdiff -= imdiff.min() + imdiff /= imdiff.max() + + if self.binarize: + th = filters.threshold_otsu(imdiff) + imdiff = (imdiff >= th).astype('float32') + + return imdiff diff --git a/pytorch_svgrender/libs/modules/edge_map/DoG/__init__.py b/pytorch_svgrender/libs/modules/edge_map/DoG/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5bea1d52fb83eb6335ce73cf4a1e4c7fb28fa671 --- /dev/null +++ b/pytorch_svgrender/libs/modules/edge_map/DoG/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .XDoG import XDoG + +__all__ = ['XDoG'] diff --git a/pytorch_svgrender/libs/modules/edge_map/__init__.py b/pytorch_svgrender/libs/modules/edge_map/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad761f2f5443eb41b15afc4116a66ecdfa9d918 --- /dev/null +++ b/pytorch_svgrender/libs/modules/edge_map/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: diff --git a/pytorch_svgrender/libs/modules/edge_map/canny/__init__.py b/pytorch_svgrender/libs/modules/edge_map/canny/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dbed53c90ffbadec00cbaa41552c71f2818dc9b6 --- /dev/null +++ b/pytorch_svgrender/libs/modules/edge_map/canny/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import cv2 + + +class CannyDetector: + + def __call__(self, img, low_threshold, high_threshold, L2gradient=False): + return cv2.Canny(img, low_threshold, high_threshold, L2gradient) + + +__all__ = ['CannyDetector'] diff --git a/pytorch_svgrender/libs/modules/edge_map/image_grads/__init__.py b/pytorch_svgrender/libs/modules/edge_map/image_grads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63070b57210f6d66256a299a3ea4538d7350127f --- /dev/null +++ b/pytorch_svgrender/libs/modules/edge_map/image_grads/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .laplacian import LaplacianDetector + +__all__ = ['LaplacianDetector'] diff --git a/pytorch_svgrender/libs/modules/edge_map/image_grads/laplacian.py b/pytorch_svgrender/libs/modules/edge_map/image_grads/laplacian.py new file mode 100644 index 0000000000000000000000000000000000000000..ee1aa25286b5d46bd4578c8240ea473ca391c6e2 --- /dev/null +++ b/pytorch_svgrender/libs/modules/edge_map/image_grads/laplacian.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + + +import cv2 + + +class LaplacianDetector: + + def __call__(self, img): + return cv2.Laplacian(img, cv2.CV_64F) diff --git a/pytorch_svgrender/libs/modules/resizer/__init__.py b/pytorch_svgrender/libs/modules/resizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea637d3d29485895facb540dffbd18fd22d025b --- /dev/null +++ b/pytorch_svgrender/libs/modules/resizer/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .resizer import resize +from . import interp_methods + +__all__ = ['resize', 'interp_methods'] diff --git a/pytorch_svgrender/libs/modules/resizer/interp_methods.py b/pytorch_svgrender/libs/modules/resizer/interp_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..27c26829ffc8dce48ad42bd46c6f6b77d0913b15 --- /dev/null +++ b/pytorch_svgrender/libs/modules/resizer/interp_methods.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Description: + +from math import pi + +try: + import torch +except ImportError: + torch = None + +try: + import numpy +except ImportError: + numpy = None + +if numpy is None and torch is None: + raise ImportError("Must have either Numpy or PyTorch but both not found") + + +def set_framework_dependencies(x): + if type(x) is numpy.ndarray: + to_dtype = lambda a: a + fw = numpy + else: + to_dtype = lambda a: a.to(x.dtype) + fw = torch + eps = fw.finfo(fw.float32).eps + return fw, to_dtype, eps + + +def support_sz(sz): + def wrapper(f): + f.support_sz = sz + return f + + return wrapper + + +@support_sz(4) +def cubic(x): + fw, to_dtype, eps = set_framework_dependencies(x) + absx = fw.abs(x) + absx2 = absx ** 2 + absx3 = absx ** 3 + return ((1.5 * absx3 - 2.5 * absx2 + 1.) * to_dtype(absx <= 1.) + + (-0.5 * absx3 + 2.5 * absx2 - 4. * absx + 2.) * + to_dtype((1. < absx) & (absx <= 2.))) + + +@support_sz(4) +def lanczos2(x): + fw, to_dtype, eps = set_framework_dependencies(x) + return (((fw.sin(pi * x) * fw.sin(pi * x / 2) + eps) / + ((pi ** 2 * x ** 2 / 2) + eps)) * to_dtype(abs(x) < 2)) + + +@support_sz(6) +def lanczos3(x): + fw, to_dtype, eps = set_framework_dependencies(x) + return (((fw.sin(pi * x) * fw.sin(pi * x / 3) + eps) / + ((pi ** 2 * x ** 2 / 3) + eps)) * to_dtype(abs(x) < 3)) + + +@support_sz(2) +def linear(x): + fw, to_dtype, eps = set_framework_dependencies(x) + return ((x + 1) * to_dtype((-1 <= x) & (x < 0)) + (1 - x) * + to_dtype((0 <= x) & (x <= 1))) + + +@support_sz(1) +def box(x): + fw, to_dtype, eps = set_framework_dependencies(x) + return to_dtype((-1 <= x) & (x < 0)) + to_dtype((0 <= x) & (x <= 1)) diff --git a/pytorch_svgrender/libs/modules/resizer/resizer.py b/pytorch_svgrender/libs/modules/resizer/resizer.py new file mode 100644 index 0000000000000000000000000000000000000000..de46777d4b726ea5a4f601397669b4d285bc801e --- /dev/null +++ b/pytorch_svgrender/libs/modules/resizer/resizer.py @@ -0,0 +1,431 @@ +# -*- coding: utf-8 -*- +# Description: + +import warnings +from math import ceil +from fractions import Fraction + +from . import interp_methods + + +class NoneClass: + pass + + +try: + import torch + from torch import nn + + nnModuleWrapped = nn.Module +except ImportError: + warnings.warn('No PyTorch found, will work only with Numpy') + torch = None + nnModuleWrapped = NoneClass + +try: + import numpy +except ImportError: + warnings.warn('No Numpy found, will work only with PyTorch') + numpy = None + +if numpy is None and torch is None: + raise ImportError("Must have either Numpy or PyTorch but both not found") + + +def resize(input, scale_factors=None, out_shape=None, + interp_method=interp_methods.cubic, support_sz=None, + antialiasing=True, by_convs=False, scale_tolerance=None, + max_numerator=10, pad_mode='replicate'): + # get properties of the input tensor + in_shape, n_dims = input.shape, input.ndim + + # fw stands for framework that can be either numpy or torch, + # determined by the input type + fw = numpy if type(input) is numpy.ndarray else torch + eps = fw.finfo(fw.float32).eps + device = input.device if fw is torch else None + + # set missing scale factors or output shapem one according to another, + # scream if both missing. this is also where all the defults policies + # take place. also handling the by_convs attribute carefully. + scale_factors, out_shape, by_convs = set_scale_and_out_sz(in_shape, + out_shape, + scale_factors, + by_convs, + scale_tolerance, + max_numerator, + eps, fw) + + # sort indices of dimensions according to scale of each dimension. + # since we are going dim by dim this is efficient + sorted_filtered_dims_and_scales = [(dim, scale_factors[dim], by_convs[dim], + in_shape[dim], out_shape[dim]) + for dim in sorted(range(n_dims), + key=lambda ind: scale_factors[ind]) + if scale_factors[dim] != 1.] + + # unless support size is specified by the user, it is an attribute + # of the interpolation method + if support_sz is None: + support_sz = interp_method.support_sz + + # output begins identical to input and changes with each iteration + output = input + + # iterate over dims + for (dim, scale_factor, dim_by_convs, in_sz, out_sz + ) in sorted_filtered_dims_and_scales: + # STEP 1- PROJECTED GRID: The non-integer locations of the projection + # of output pixel locations to the input tensor + projected_grid = get_projected_grid(in_sz, out_sz, + scale_factor, fw, dim_by_convs, + device) + + # STEP 1.5: ANTIALIASING- If antialiasing is taking place, we modify + # the window size and the interpolation method (see inside function) + cur_interp_method, cur_support_sz = apply_antialiasing_if_needed( + interp_method, + support_sz, + scale_factor, + antialiasing) + + # STEP 2- FIELDS OF VIEW: for each output pixels, map the input pixels + # that influence it. Also calculate needed padding and update grid + # accoedingly + field_of_view = get_field_of_view(projected_grid, cur_support_sz, fw, + eps, device) + + # STEP 2.5- CALCULATE PAD AND UPDATE: according to the field of view, + # the input should be padded to handle the boundaries, coordinates + # should be updated. actual padding only occurs when weights are + # aplied (step 4). if using by_convs for this dim, then we need to + # calc right and left boundaries for each filter instead. + pad_sz, projected_grid, field_of_view = calc_pad_sz(in_sz, out_sz, + field_of_view, + projected_grid, + scale_factor, + dim_by_convs, fw, + device) + + # STEP 3- CALCULATE WEIGHTS: Match a set of weights to the pixels in + # the field of view for each output pixel + weights = get_weights(cur_interp_method, projected_grid, field_of_view) + + # STEP 4- APPLY WEIGHTS: Each output pixel is calculated by multiplying + # its set of weights with the pixel values in its field of view. + # We now multiply the fields of view with their matching weights. + # We do this by tensor multiplication and broadcasting. + # if by_convs is true for this dim, then we do this action by + # convolutions. this is equivalent but faster. + if not dim_by_convs: + output = apply_weights(output, field_of_view, weights, dim, n_dims, + pad_sz, pad_mode, fw) + else: + output = apply_convs(output, scale_factor, in_sz, out_sz, weights, + dim, pad_sz, pad_mode, fw) + return output + + +def get_projected_grid(in_sz, out_sz, scale_factor, fw, by_convs, device=None): + # we start by having the ouput coordinates which are just integer locations + # in the special case when usin by_convs, we only need two cycles of grid + # points. the first and last. + grid_sz = out_sz if not by_convs else scale_factor.numerator + out_coordinates = fw_arange(grid_sz, fw, device) + + # This is projecting the ouput pixel locations in 1d to the input tensor, + # as non-integer locations. + # the following fomrula is derived in the paper + # "From Discrete to Continuous Convolutions" by Shocher et al. + return (out_coordinates / float(scale_factor) + + (in_sz - 1) / 2 - (out_sz - 1) / (2 * float(scale_factor))) + + +def get_field_of_view(projected_grid, cur_support_sz, fw, eps, device): + # for each output pixel, map which input pixels influence it, in 1d. + # we start by calculating the leftmost neighbor, using half of the window + # size (eps is for when boundary is exact int) + left_boundaries = fw_ceil(projected_grid - cur_support_sz / 2 - eps, fw) + + # then we simply take all the pixel centers in the field by counting + # window size pixels from the left boundary + ordinal_numbers = fw_arange(ceil(cur_support_sz - eps), fw, device) + return left_boundaries[:, None] + ordinal_numbers + + +def calc_pad_sz(in_sz, out_sz, field_of_view, projected_grid, scale_factor, + dim_by_convs, fw, device): + if not dim_by_convs: + # determine padding according to neighbor coords out of bound. + # this is a generalized notion of padding, when pad<0 it means crop + pad_sz = [-field_of_view[0, 0].item(), + field_of_view[-1, -1].item() - in_sz + 1] + + # since input image will be changed by padding, coordinates of both + # field_of_view and projected_grid need to be updated + field_of_view += pad_sz[0] + projected_grid += pad_sz[0] + + else: + # only used for by_convs, to calc the boundaries of each filter the + # number of distinct convolutions is the numerator of the scale factor + num_convs, stride = scale_factor.numerator, scale_factor.denominator + + # calculate left and right boundaries for each conv. left can also be + # negative right can be bigger than in_sz. such cases imply padding if + # needed. however if# both are in-bounds, it means we need to crop, + # practically apply the conv only on part of the image. + left_pads = -field_of_view[:, 0] + + # next calc is tricky, explanation by rows: + # 1) counting output pixels between the first position of each filter + # to the right boundary of the input + # 2) dividing it by number of filters to count how many 'jumps' + # each filter does + # 3) multiplying by the stride gives us the distance over the input + # coords done by all these jumps for each filter + # 4) to this distance we add the right boundary of the filter when + # placed in its leftmost position. so now we get the right boundary + # of that filter in input coord. + # 5) the padding size needed is obtained by subtracting the rightmost + # input coordinate. if the result is positive padding is needed. if + # negative then negative padding means shaving off pixel columns. + right_pads = (((out_sz - fw_arange(num_convs, fw, device) - 1) # (1) + // num_convs) # (2) + * stride # (3) + + field_of_view[:, -1] # (4) + - in_sz + 1) # (5) + + # in the by_convs case pad_sz is a list of left-right pairs. one per + # each filter + + pad_sz = list(zip(left_pads, right_pads)) + + return pad_sz, projected_grid, field_of_view + + +def get_weights(interp_method, projected_grid, field_of_view): + # the set of weights per each output pixels is the result of the chosen + # interpolation method applied to the distances between projected grid + # locations and the pixel-centers in the field of view (distances are + # directed, can be positive or negative) + weights = interp_method(projected_grid[:, None] - field_of_view) + + # we now carefully normalize the weights to sum to 1 per each output pixel + sum_weights = weights.sum(1, keepdims=True) + sum_weights[sum_weights == 0] = 1 + return weights / sum_weights + + +def apply_weights(input, field_of_view, weights, dim, n_dims, pad_sz, pad_mode, + fw): + # for this operation we assume the resized dim is the first one. + # so we transpose and will transpose back after multiplying + tmp_input = fw_swapaxes(input, dim, 0, fw) + + # apply padding + tmp_input = fw_pad(tmp_input, fw, pad_sz, pad_mode) + + # field_of_view is a tensor of order 2: for each output (1d location + # along cur dim)- a list of 1d neighbors locations. + # note that this whole operations is applied to each dim separately, + # this is why it is all in 1d. + # neighbors = tmp_input[field_of_view] is a tensor of order image_dims+1: + # for each output pixel (this time indicated in all dims), these are the + # values of the neighbors in the 1d field of view. note that we only + # consider neighbors along the current dim, but such set exists for every + # multi-dim location, hence the final tensor order is image_dims+1. + neighbors = tmp_input[field_of_view] + + # weights is an order 2 tensor: for each output location along 1d- a list + # of weights matching the field of view. we augment it with ones, for + # broadcasting, so that when multiplies some tensor the weights affect + # only its first dim. + tmp_weights = fw.reshape(weights, (*weights.shape, *[1] * (n_dims - 1))) + + # now we simply multiply the weights with the neighbors, and then sum + # along the field of view, to get a single value per out pixel + tmp_output = (neighbors * tmp_weights).sum(1) + + # we transpose back the resized dim to its original position + return fw_swapaxes(tmp_output, 0, dim, fw) + + +def apply_convs(input, scale_factor, in_sz, out_sz, weights, dim, pad_sz, + pad_mode, fw): + # for this operations we assume the resized dim is the last one. + # so we transpose and will transpose back after multiplying + input = fw_swapaxes(input, dim, -1, fw) + + # the stride for all convs is the denominator of the scale factor + stride, num_convs = scale_factor.denominator, scale_factor.numerator + + # prepare an empty tensor for the output + tmp_out_shape = list(input.shape) + tmp_out_shape[-1] = out_sz + tmp_output = fw_empty(tuple(tmp_out_shape), fw, input.device) + + # iterate over the conv operations. we have as many as the numerator + # of the scale-factor. for each we need boundaries and a filter. + for conv_ind, (pad_sz, filt) in enumerate(zip(pad_sz, weights)): + # apply padding (we pad last dim, padding can be negative) + pad_dim = input.ndim - 1 + tmp_input = fw_pad(input, fw, pad_sz, pad_mode, dim=pad_dim) + + # apply convolution over last dim. store in the output tensor with + # positional strides so that when the loop is comlete conv results are + # interwind + tmp_output[..., conv_ind::num_convs] = fw_conv(tmp_input, filt, stride) + + return fw_swapaxes(tmp_output, -1, dim, fw) + + +def set_scale_and_out_sz(in_shape, out_shape, scale_factors, by_convs, + scale_tolerance, max_numerator, eps, fw): + # eventually we must have both scale-factors and out-sizes for all in/out + # dims. however, we support many possible partial arguments + if scale_factors is None and out_shape is None: + raise ValueError("either scale_factors or out_shape should be " + "provided") + if out_shape is not None: + # if out_shape has less dims than in_shape, we defaultly resize the + # first dims for numpy and last dims for torch + out_shape = (list(out_shape) + list(in_shape[len(out_shape):]) + if fw is numpy + else list(in_shape[:-len(out_shape)]) + list(out_shape)) + if scale_factors is None: + # if no scale given, we calculate it as the out to in ratio + # (not recomended) + scale_factors = [out_sz / in_sz for out_sz, in_sz + in zip(out_shape, in_shape)] + if scale_factors is not None: + # by default, if a single number is given as scale, we assume resizing + # two dims (most common are images with 2 spatial dims) + scale_factors = (scale_factors + if isinstance(scale_factors, (list, tuple)) + else [scale_factors, scale_factors]) + # if less scale_factors than in_shape dims, we defaultly resize the + # first dims for numpy and last dims for torch + scale_factors = (list(scale_factors) + [1] * + (len(in_shape) - len(scale_factors)) if fw is numpy + else [1] * (len(in_shape) - len(scale_factors)) + + list(scale_factors)) + if out_shape is None: + # when no out_shape given, it is calculated by multiplying the + # scale by the in_shape (not recomended) + out_shape = [ceil(scale_factor * in_sz) + for scale_factor, in_sz in + zip(scale_factors, in_shape)] + # next part intentionally after out_shape determined for stability + # we fix by_convs to be a list of truth values in case it is not + if not isinstance(by_convs, (list, tuple)): + by_convs = [by_convs] * len(out_shape) + + # next loop fixes the scale for each dim to be either frac or float. + # this is determined by by_convs and by tolerance for scale accuracy. + for ind, (sf, dim_by_convs) in enumerate(zip(scale_factors, by_convs)): + # first we fractionaize + if dim_by_convs: + frac = Fraction(1 / sf).limit_denominator(max_numerator) + frac = Fraction(numerator=frac.denominator, denominator=frac.numerator) + + # if accuracy is within tolerance scale will be frac. if not, then + # it will be float and the by_convs attr will be set false for + # this dim + if scale_tolerance is None: + scale_tolerance = eps + if dim_by_convs and abs(frac - sf) < scale_tolerance: + scale_factors[ind] = frac + else: + scale_factors[ind] = float(sf) + by_convs[ind] = False + + return scale_factors, out_shape, by_convs + + +def apply_antialiasing_if_needed(interp_method, support_sz, scale_factor, + antialiasing): + # antialiasing is "stretching" the field of view according to the scale + # factor (only for downscaling). this is low-pass filtering. this + # requires modifying both the interpolation (stretching the 1d + # function and multiplying by the scale-factor) and the window size. + scale_factor = float(scale_factor) + if scale_factor >= 1.0 or not antialiasing: + return interp_method, support_sz + cur_interp_method = (lambda arg: scale_factor * + interp_method(scale_factor * arg)) + cur_support_sz = support_sz / scale_factor + return cur_interp_method, cur_support_sz + + +def fw_ceil(x, fw): + if fw is numpy: + return fw.int_(fw.ceil(x)) + else: + return x.ceil().long() + + +def fw_floor(x, fw): + if fw is numpy: + return fw.int_(fw.floor(x)) + else: + return x.floor().long() + + +def fw_cat(x, fw): + if fw is numpy: + return fw.concatenate(x) + else: + return fw.cat(x) + + +def fw_swapaxes(x, ax_1, ax_2, fw): + if fw is numpy: + return fw.swapaxes(x, ax_1, ax_2) + else: + return x.transpose(ax_1, ax_2) + + +def fw_pad(x, fw, pad_sz, pad_mode, dim=0): + if pad_sz == (0, 0): + return x + if fw is numpy: + pad_vec = [(0, 0)] * x.ndim + pad_vec[dim] = pad_sz + return fw.pad(x, pad_width=pad_vec, mode=pad_mode) + else: + if x.ndim < 3: + x = x[None, None, ...] + + pad_vec = [0] * ((x.ndim - 2) * 2) + pad_vec[0:2] = pad_sz + return fw.nn.functional.pad(x.transpose(dim, -1), pad=pad_vec, + mode=pad_mode).transpose(dim, -1) + + +def fw_conv(input, filter, stride): + # we want to apply 1d conv to any nd array. the way to do it is to reshape + # the input to a 4D tensor. first two dims are singeletons, 3rd dim stores + # all the spatial dims that we are not convolving along now. then we can + # apply conv2d with a 1xK filter. This convolves the same way all the other + # dims stored in the 3d dim. like depthwise conv over these. + # TODO: numpy support + reshaped_input = input.reshape(1, 1, -1, input.shape[-1]) + reshaped_output = torch.nn.functional.conv2d(reshaped_input, + filter.view(1, 1, 1, -1), + stride=(1, stride)) + return reshaped_output.reshape(*input.shape[:-1], -1) + + +def fw_arange(upper_bound, fw, device): + if fw is numpy: + return fw.arange(upper_bound) + else: + return fw.arange(upper_bound, device=device) + + +def fw_empty(shape, fw, device): + if fw is numpy: + return fw.empty(shape) + else: + return fw.empty(size=(*shape,), device=device) diff --git a/pytorch_svgrender/libs/modules/vision/__init__.py b/pytorch_svgrender/libs/modules/vision/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0804535662dfb998c093d164f45950c7bffefca8 --- /dev/null +++ b/pytorch_svgrender/libs/modules/vision/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .inception import inception_v3 +from .vgg import VGG + +__all__ = [ + 'inception_v3', + 'VGG' +] diff --git a/pytorch_svgrender/libs/modules/vision/inception.py b/pytorch_svgrender/libs/modules/vision/inception.py new file mode 100644 index 0000000000000000000000000000000000000000..45f20be02e0863329d996a77df68bedaf56deafc --- /dev/null +++ b/pytorch_svgrender/libs/modules/vision/inception.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from collections import namedtuple +import warnings +from typing import Callable, Any, Optional, Tuple, List + +import torch +from torch import nn, Tensor +import torch.nn.functional as F +from torch.utils.model_zoo import load_url as load_state_dict_from_url + +__all__ = ['Inception3', 'inception_v3', 'InceptionOutputs', '_InceptionOutputs'] + +model_urls = { + # Inception v3 ported from TensorFlow + 'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth', +} + +InceptionOutputs = namedtuple('InceptionOutputs', ['logits', 'aux_logits']) +InceptionOutputs.__annotations__ = {'logits': Tensor, 'aux_logits': Optional[Tensor]} + +# Script annotations failed with _GoogleNetOutputs = namedtuple ... +# _InceptionOutputs set here for backwards compat +_InceptionOutputs = InceptionOutputs + + +def inception_v3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "Inception3": + r"""Inception v3 model architecture from + `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. + + .. note:: + **Important**: In contrast to the other models the inception_v3 expects tensors with a size of + N x 3 x 299 x 299, so ensure your images are sized accordingly. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + aux_logits (bool): If True, add an auxiliary branch that can improve training. + Default: *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' in kwargs: + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + else: + original_aux_logits = True + kwargs['init_weights'] = False # we are loading weights from a pretrained model + model = Inception3(**kwargs) + state_dict = load_state_dict_from_url(model_urls['inception_v3_google'], + progress=progress) + model.load_state_dict(state_dict) + if not original_aux_logits: + model.aux_logits = False + model.AuxLogits = None + return model + + return Inception3(**kwargs) + + +class Inception3(nn.Module): + + def __init__( + self, + num_classes: int = 1000, + aux_logits: bool = True, + transform_input: bool = False, + inception_blocks: Optional[List[Callable[..., nn.Module]]] = None, + init_weights: Optional[bool] = None + ) -> None: + super(Inception3, self).__init__() + if inception_blocks is None: + inception_blocks = [ + BasicConv2d, InceptionA, InceptionB, InceptionC, + InceptionD, InceptionE, InceptionAux + ] + if init_weights is None: + warnings.warn('The default weight initialization of inception_v3 will be changed in future releases of ' + 'torchvision. If you wish to keep the old behavior (which leads to long initialization times' + ' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning) + init_weights = True + assert len(inception_blocks) == 7 + conv_block = inception_blocks[0] + inception_a = inception_blocks[1] + inception_b = inception_blocks[2] + inception_c = inception_blocks[3] + inception_d = inception_blocks[4] + inception_e = inception_blocks[5] + inception_aux = inception_blocks[6] + + self.aux_logits = aux_logits + self.transform_input = transform_input + self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) + self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) + self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = inception_a(192, pool_features=32) + self.Mixed_5c = inception_a(256, pool_features=64) + self.Mixed_5d = inception_a(288, pool_features=64) + self.Mixed_6a = inception_b(288) + self.Mixed_6b = inception_c(768, channels_7x7=128) + self.Mixed_6c = inception_c(768, channels_7x7=160) + self.Mixed_6d = inception_c(768, channels_7x7=160) + self.Mixed_6e = inception_c(768, channels_7x7=192) + self.AuxLogits: Optional[nn.Module] = None + if aux_logits: + self.AuxLogits = inception_aux(768, num_classes) + self.Mixed_7a = inception_d(768) + self.Mixed_7b = inception_e(1280) + self.Mixed_7c = inception_e(2048) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout() + self.fc = nn.Linear(2048, num_classes) + if init_weights: + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + X = stats.truncnorm(-2, 2, scale=stddev) + values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) + values = values.view(m.weight.size()) + with torch.no_grad(): + m.weight.copy_(values) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _transform_input(self, x: Tensor) -> Tensor: + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + return x + + def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor]]: + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + feat = self.maxpool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(feat) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.maxpool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + aux: Optional[Tensor] = None + if self.AuxLogits is not None: + if self.training: + aux = self.AuxLogits(x) + # N x 768 x 17 x 17 + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + # Adaptive average pooling + x = self.avgpool(x) + # N x 2048 x 1 x 1 + x = self.dropout(x) + # N x 2048 x 1 x 1 + x = torch.flatten(x, 1) + # N x 2048 + x = self.fc(x) + # N x 1000 (num_classes) + return feat, x, aux + + @torch.jit.unused + def eager_outputs(self, x: Tensor, aux: Optional[Tensor]) -> InceptionOutputs: + if self.training and self.aux_logits: + return InceptionOutputs(x, aux) + else: + return x # type: ignore[return-value] + + def forward(self, x: Tensor) -> InceptionOutputs: + x = self._transform_input(x) + feat, x, aux = self._forward(x) + aux_defined = self.training and self.aux_logits + if torch.jit.is_scripting(): + if not aux_defined: + warnings.warn("Scripted Inception3 always returns Inception3 Tuple") + return feat, InceptionOutputs(x, aux) + else: + return feat, self.eager_outputs(x, aux) + + +class InceptionA(nn.Module): + + def __init__( + self, + in_channels: int, + pool_features: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__( + self, + in_channels: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__( + self, + in_channels: int, + channels_7x7: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__( + self, + in_channels: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__( + self, + in_channels: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__( + self, + in_channels: int, + num_classes: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 # type: ignore[assignment] + self.fc = nn.Linear(768, num_classes) + self.fc.stddev = 0.001 # type: ignore[assignment] + + def forward(self, x: Tensor) -> Tensor: + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__( + self, + in_channels: int, + out_channels: int, + **kwargs: Any + ) -> None: + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x: Tensor) -> Tensor: + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) diff --git a/pytorch_svgrender/libs/modules/vision/vgg.py b/pytorch_svgrender/libs/modules/vision/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..a5e06c1dee136b1c01cc891f0bbed3da5553e066 --- /dev/null +++ b/pytorch_svgrender/libs/modules/vision/vgg.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from typing import Union, List, Dict, Any, cast + +import torch +import torch.nn as nn +from torch.utils.model_zoo import load_url as load_state_dict_from_url + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + +model_urls = { + 'vgg11': 'https://download.pytorch.org/models/vgg11-8a719046.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-19584684.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', + 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', + 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', + 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', + 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', +} + + +class VGG(nn.Module): + + def __init__( + self, + features: nn.Module, + num_classes: int = 1000, + init_weights: bool = True + ) -> None: + super(VGG, self).__init__() + self.features = features + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + if init_weights: + self._initialize_weights() + + def forward(self, x: torch.Tensor): + feat = self.features(x) + x = self.avgpool(feat) + x = torch.flatten(x, 1) + x = self.classifier(x) + return feat, x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential: + layers: List[nn.Module] = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + v = cast(int, v) + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG: + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs) + + +def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) + + +def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs) + + +def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs) + + +def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs) + + +def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs) + + +def vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs) + + +def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs) diff --git a/pytorch_svgrender/libs/modules/visual/__init__.py b/pytorch_svgrender/libs/modules/visual/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad761f2f5443eb41b15afc4116a66ecdfa9d918 --- /dev/null +++ b/pytorch_svgrender/libs/modules/visual/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: diff --git a/pytorch_svgrender/libs/modules/visual/imshow.py b/pytorch_svgrender/libs/modules/visual/imshow.py new file mode 100644 index 0000000000000000000000000000000000000000..896670001d6c3a40aab3b0d66991e21f64fe8f21 --- /dev/null +++ b/pytorch_svgrender/libs/modules/visual/imshow.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import pathlib +from typing import Union, List, Text, BinaryIO, AnyStr + +import matplotlib.pyplot as plt +import torch +import torchvision.transforms as transforms +from torchvision.utils import make_grid + +__all__ = [ + 'sample2pil_transforms', + 'pt2numpy_transforms', + 'plt_pt_img', + 'save_grid_images_and_labels', + 'save_grid_images_and_captions', +] + +# generate sample to PIL images +sample2pil_transforms = transforms.Compose([ + # unnormalizing to [0,1] + transforms.Lambda(lambda t: torch.clamp((t + 1) / 2, min=0.0, max=1.0)), + # Add 0.5 after unnormalizing to [0, 255] + transforms.Lambda(lambda t: torch.clamp(t * 255. + 0.5, min=0, max=255)), + # CHW to HWC + transforms.Lambda(lambda t: t.permute(1, 2, 0)), + # to numpy ndarray, dtype int8 + transforms.Lambda(lambda t: t.to('cpu', torch.uint8).numpy()), + # Converts a numpy ndarray of shape H x W x C to a PIL Image + transforms.ToPILImage(), +]) + +# generate sample to PIL images +pt2numpy_transforms = transforms.Compose([ + # Add 0.5 after unnormalizing to [0, 255] + transforms.Lambda(lambda t: torch.clamp(t * 255. + 0.5, min=0, max=255)), + # CHW to HWC + transforms.Lambda(lambda t: t.permute(1, 2, 0)), + # to numpy ndarray, dtype int8 + transforms.Lambda(lambda t: t.to('cpu', torch.uint8).numpy()), +]) + + +def plt_pt_img( + pt_img: torch.Tensor, + save_path: AnyStr = None, + title: AnyStr = None, + dpi: int = 300 +): + grid = make_grid(pt_img, normalize=True, pad_value=2) + ndarr = pt2numpy_transforms(grid) + plt.imshow(ndarr) + plt.axis("off") + plt.tight_layout() + if title is not None: + plt.title(f"{title}") + + plt.show() + if save_path is not None: + plt.savefig(save_path, dpi=dpi) + + plt.close() + + +@torch.no_grad() +def save_grid_images_and_labels( + images: Union[torch.Tensor, List[torch.Tensor]], + probs: Union[torch.Tensor, List[torch.Tensor]], + labels: Union[torch.Tensor, List[torch.Tensor]], + classes: Union[torch.Tensor, List[torch.Tensor]], + fp: Union[Text, pathlib.Path, BinaryIO], + nrow: int = 4, + normalize: bool = True +) -> None: + """Save a given Tensor into an image file. + """ + num_images = len(images) + num_rows, num_cols = _get_subplot_shape(num_images, nrow) + + fig = plt.figure(figsize=(25, 20)) + + for i in range(num_images): + ax = fig.add_subplot(num_rows, num_cols, i + 1) + + image, true_label, prob = images[i], labels[i], probs[i] + + true_prob = prob[true_label] + incorrect_prob, incorrect_label = torch.max(prob, dim=0) + true_class = classes[true_label] + + incorrect_class = classes[incorrect_label] + + if normalize: + image = sample2pil_transforms(image) + + ax.imshow(image) + title = f'true label: {true_class} ({true_prob:.3f})\n ' \ + f'pred label: {incorrect_class} ({incorrect_prob:.3f})' + ax.set_title(title, fontsize=20) + ax.axis('off') + + fig.subplots_adjust(hspace=0.3) + + plt.savefig(fp) + plt.close() + + +@torch.no_grad() +def save_grid_images_and_captions( + images: Union[torch.Tensor, List[torch.Tensor]], + captions: List, + fp: Union[Text, pathlib.Path, BinaryIO], + nrow: int = 4, + normalize: bool = True +) -> None: + """ + Save a grid of images and their captions into an image file. + + Args: + images (Union[torch.Tensor, List[torch.Tensor]]): A list of images to display. + captions (List): A list of captions for each image. + fp (Union[Text, pathlib.Path, BinaryIO]): The file path to save the image to. + nrow (int, optional): The number of images to display in each row. Defaults to 4. + normalize (bool, optional): Whether to normalize the image or not. Defaults to False. + """ + num_images = len(images) + num_rows, num_cols = _get_subplot_shape(num_images, nrow) + + fig = plt.figure(figsize=(25, 20)) + + for i in range(num_images): + ax = fig.add_subplot(num_rows, num_cols, i + 1) + image, caption = images[i], captions[i] + + if normalize: + image = sample2pil_transforms(image) + + ax.imshow(image) + title = f'"{caption}"' if num_images > 1 else f'"{captions}"' + title = _insert_newline(title) + ax.set_title(title, fontsize=20) + ax.axis('off') + + fig.subplots_adjust(hspace=0.3) + + plt.savefig(fp) + plt.close() + + +def _get_subplot_shape(num_images, nrow): + """ + Calculate the number of rows and columns required to display images in a grid. + + Args: + num_images (int): The total number of images to display. + nrow (int): The maximum number of images to display in each row. + + Returns: + Tuple[int, int]: The number of rows and columns required to display images in a grid. + """ + num_cols = min(num_images, nrow) + num_rows = (num_images + num_cols - 1) // num_cols + return num_rows, num_cols + + +def _insert_newline(string, point=9): + # split by blank + words = string.split() + if len(words) <= point: + return string + + word_chunks = [words[i:i + point] for i in range(0, len(words), point)] + new_string = "\n".join(" ".join(chunk) for chunk in word_chunks) + return new_string diff --git a/pytorch_svgrender/libs/solver/__init__.py b/pytorch_svgrender/libs/solver/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad761f2f5443eb41b15afc4116a66ecdfa9d918 --- /dev/null +++ b/pytorch_svgrender/libs/solver/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: diff --git a/pytorch_svgrender/libs/solver/lr_scheduler.py b/pytorch_svgrender/libs/solver/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..94ac5a2e44a20fa63d8ea4fe85f0eee7404ace9a --- /dev/null +++ b/pytorch_svgrender/libs/solver/lr_scheduler.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for diffusion models.""" + +import math +from enum import Enum +from typing import Optional, Union + +from torch.optim import Optimizer +from torch.optim.lr_scheduler import LambdaLR + + +class SchedulerType(Enum): + LINEAR = "linear" + COSINE = "cosine" + COSINE_WITH_RESTARTS = "cosine_with_restarts" + POLYNOMIAL = "polynomial" + CONSTANT = "constant" + CONSTANT_WITH_WARMUP = "constant_with_warmup" + PIECEWISE_CONSTANT = "piecewise_constant" + + +def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) + + +def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate + increases linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) + return 1.0 + + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + + +def get_piecewise_constant_schedule(optimizer: Optimizer, step_rules: str, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate, using the learning rate set in optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + step_rules (`string`): + The rules for the learning rate. ex: rule_steps="1:10,0.1:20,0.01:30,0.005" it means that the learning rate + if multiple 1 for the first 10 steps, mutiple 0.1 for the next 20 steps, multiple 0.01 for the next 30 + steps and multiple 0.005 for the other steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + rules_dict = {} + rule_list = step_rules.split(",") + for rule_str in rule_list[:-1]: + value_str, steps_str = rule_str.split(":") + steps = int(steps_str) + value = float(value_str) + rules_dict[steps] = value + last_lr_multiple = float(rule_list[-1]) + + def create_rules_function(rules_dict, last_lr_multiple): + def rule_func(steps: int) -> float: + sorted_steps = sorted(rules_dict.keys()) + for i, sorted_step in enumerate(sorted_steps): + if steps < sorted_step: + return rules_dict[sorted_steps[i]] + return last_lr_multiple + + return rule_func + + rules_func = create_rules_function(rules_dict, last_lr_multiple) + + return LambdaLR(optimizer, rules_func, last_epoch=last_epoch) + + +def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): + """ + Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after + a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + return max( + 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) + ) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, + last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`float`, *optional*, defaults to 0.5): + The number of periods of the cosine function in a schedule (the default is to just decrease from the max + value to 0 following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_cosine_with_hard_restarts_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases + linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_cycles (`int`, *optional*, defaults to 1): + The number of hard restarts to use. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + if progress >= 1.0: + return 0.0 + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +def get_polynomial_decay_schedule_with_warmup( + optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 +): + """ + Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the + optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + lr_end (`float`, *optional*, defaults to 1e-7): + The end LR. + power (`float`, *optional*, defaults to 1.0): + Power factor. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT + implementation at + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + + """ + + lr_init = optimizer.defaults["lr"] + if not (lr_init > lr_end): + raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})") + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + elif current_step > num_training_steps: + return lr_end / lr_init # as LambdaLR multiplies by lr_init + else: + lr_range = lr_init - lr_end + decay_steps = num_training_steps - num_warmup_steps + pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps + decay = lr_range * pct_remaining ** power + lr_end + return decay / lr_init # as LambdaLR multiplies by lr_init + + return LambdaLR(optimizer, lr_lambda, last_epoch) + + +TYPE_TO_SCHEDULER_FUNCTION = { + SchedulerType.LINEAR: get_linear_schedule_with_warmup, + SchedulerType.COSINE: get_cosine_schedule_with_warmup, + SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, + SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, + SchedulerType.CONSTANT: get_constant_schedule, + SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, + SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, +} + + +def get_scheduler( + name: Union[str, SchedulerType], + optimizer: Optimizer, + step_rules: Optional[str] = None, + num_warmup_steps: Optional[int] = None, + num_training_steps: Optional[int] = None, + num_cycles: int = 1, + power: float = 1.0, + last_epoch: int = -1, +): + """ + Unified API to get any scheduler from its name. + + Args: + name (`str` or `SchedulerType`): + The name of the scheduler to use. + optimizer (`torch.optim.Optimizer`): + The optimizer that will be used during training. + step_rules (`str`, *optional*): + A string representing the step rules to use. This is only used by the `PIECEWISE_CONSTANT` scheduler. + num_warmup_steps (`int`, *optional*): + The number of warmup steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_training_steps (`int``, *optional*): + The number of training steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_cycles (`int`, *optional*): + The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler. + power (`float`, *optional*, defaults to 1.0): + Power factor. See `POLYNOMIAL` scheduler + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + """ + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + if name == SchedulerType.CONSTANT: + return schedule_func(optimizer, last_epoch=last_epoch) + + if name == SchedulerType.PIECEWISE_CONSTANT: + return schedule_func(optimizer, step_rules=step_rules, last_epoch=last_epoch) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, last_epoch=last_epoch) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + if name == SchedulerType.COSINE_WITH_RESTARTS: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + last_epoch=last_epoch, + ) + + if name == SchedulerType.POLYNOMIAL: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + power=power, + last_epoch=last_epoch, + ) + + return schedule_func( + optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, last_epoch=last_epoch + ) diff --git a/pytorch_svgrender/libs/solver/optim.py b/pytorch_svgrender/libs/solver/optim.py new file mode 100644 index 0000000000000000000000000000000000000000..e154c31404d38182aba2ad995e07c6b127195a28 --- /dev/null +++ b/pytorch_svgrender/libs/solver/optim.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: SVGDreamer - optim +# Copyright (c) 2023, XiMing Xing. +# License: MIT License +from functools import partial + +import torch +from omegaconf import DictConfig + + +def get_optimizer(optimizer_name, parameters, lr=None, config: DictConfig = None): + param_dict = {} + if optimizer_name == "adam": + optimizer = partial(torch.optim.Adam, params=parameters) + if lr is not None: + optimizer = partial(torch.optim.Adam, params=parameters, lr=lr) + if config.get('betas'): + param_dict['betas'] = config.betas + if config.get('weight_decay'): + param_dict['weight_decay'] = config.weight_decay + if config.get('eps'): + param_dict['eps'] = config.eps + elif optimizer_name == "adamW": + optimizer = partial(torch.optim.AdamW, params=parameters) + if lr is not None: + optimizer = partial(torch.optim.AdamW, params=parameters, lr=lr) + if config.get('betas'): + param_dict['betas'] = config.betas + if config.get('weight_decay'): + param_dict['weight_decay'] = config.weight_decay + if config.get('eps'): + param_dict['eps'] = config.eps + elif optimizer_name == "radam": + optimizer = partial(torch.optim.RAdam, params=parameters) + if lr is not None: + optimizer = partial(torch.optim.RAdam, params=parameters, lr=lr) + if config.get('betas'): + param_dict['betas'] = config.betas + if config.get('weight_decay'): + param_dict['weight_decay'] = config.weight_decay + elif optimizer_name == "sgd": + optimizer = partial(torch.optim.SGD, params=parameters) + if lr is not None: + optimizer = partial(torch.optim.SGD, params=parameters, lr=lr) + if config.get('momentum'): + param_dict['momentum'] = config.momentum + if config.get('weight_decay'): + param_dict['weight_decay'] = config.weight_decay + if config.get('nesterov'): + param_dict['nesterov'] = config.nesterov + else: + raise NotImplementedError(f"Optimizer {optimizer_name} not implemented.") + + if len(param_dict.keys()) > 0: + return optimizer(**param_dict) + else: + return optimizer() diff --git a/pytorch_svgrender/libs/utils/__init__.py b/pytorch_svgrender/libs/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..854865eb7334af6aef8017506126341d93663a71 --- /dev/null +++ b/pytorch_svgrender/libs/utils/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +from . import lazy + +# __getattr__, __dir__, __all__ = lazy.attach( +# __name__, +# submodules={}, +# submod_attrs={ +# 'misc': ['identity', 'exists', 'default', 'has_int_squareroot', 'sum_params', 'cycle', 'num_to_groups', +# 'extract', 'normalize', 'unnormalize'], +# 'tqdm': ['tqdm_decorator'], +# 'lazy': ['load'] +# } +# ) + +from .misc import ( + identity, + exists, + default, + has_int_squareroot, + sum_params, + cycle, + num_to_groups, + extract, + normalize, + unnormalize +) +from .tqdm import tqdm_decorator diff --git a/pytorch_svgrender/libs/utils/lazy.py b/pytorch_svgrender/libs/utils/lazy.py new file mode 100644 index 0000000000000000000000000000000000000000..170179a31867454546db3c7cee3f733650cd8d34 --- /dev/null +++ b/pytorch_svgrender/libs/utils/lazy.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import importlib +import importlib.util +import os +import sys + + +def attach(package_name, submodules=None, submod_attrs=None): + """Attach lazily loaded submodules, functions, or other attributes. + + Typically, modules import submodules and attributes as follows:: + + import mysubmodule + import anothersubmodule + + from .foo import someattr + + The idea is to replace a package's `__getattr__`, `__dir__`, and + `__all__`, such that all imports work exactly the way they did + before, except that they are only imported when used. + + The typical way to call this function, replacing the above imports, is:: + + __getattr__, __lazy_dir__, __all__ = lazy.attach( + __name__, + ['mysubmodule', 'anothersubmodule'], + {'foo': 'someattr'} + ) + + This functionality requires Python 3.7 or higher. + + Parameters + ---------- + package_name : str + Typically use ``__name__``. + submodules : set + List of submodules to attach. + submod_attrs : dict + Dictionary of submodule -> list of attributes / functions. + These attributes are imported as they are used. + + Returns + ------- + __getattr__, __dir__, __all__ + + """ + if submod_attrs is None: + submod_attrs = {} + + if submodules is None: + submodules = set() + else: + submodules = set(submodules) + + attr_to_modules = { + attr: mod for mod, attrs in submod_attrs.items() for attr in attrs + } + + __all__ = list(submodules | attr_to_modules.keys()) + + def __getattr__(name): + if name in submodules: + return importlib.import_module(f'{package_name}.{name}') + elif name in attr_to_modules: + submod = importlib.import_module( + f'{package_name}.{attr_to_modules[name]}' + ) + return getattr(submod, name) + else: + raise AttributeError(f'No {package_name} attribute {name}') + + def __dir__(): + return __all__ + + eager_import = os.environ.get('EAGER_IMPORT', '') + if eager_import not in ['', '0', 'false']: + for attr in set(attr_to_modules.keys()) | submodules: + __getattr__(attr) + + return __getattr__, __dir__, list(__all__) + + +def load(fullname): + """Return a lazily imported proxy for a module. + + We often see the following pattern:: + + def myfunc(): + import scipy as sp + sp.argmin(...) + .... + + This is to prevent a module, in this case `scipy`, from being + imported at function definition time, since that can be slow. + + This function provides a proxy module that, upon access, imports + the actual module. So the idiom equivalent to the above example is:: + + sp = lazy.load("scipy") + + def myfunc(): + sp.argmin(...) + .... + + The initial import time is fast because the actual import is delayed + until the first attribute is requested. The overall import time may + decrease as well for users that don't make use of large portions + of the library. + + Parameters + ---------- + fullname : str + The full name of the module or submodule to import. For example:: + + sp = lazy.load('scipy') # import scipy as sp + spla = lazy.load('scipy.linalg') # import scipy.linalg as spla + + Returns + ------- + pm : importlib.util._LazyModule + Proxy module. Can be used like any regularly imported module. + Actual loading of the module occurs upon first attribute request. + + """ + try: + return sys.modules[fullname] + except KeyError: + pass + + spec = importlib.util.find_spec(fullname) + if spec is None: + raise ModuleNotFoundError(f"No module name '{fullname}'") + + module = importlib.util.module_from_spec(spec) + sys.modules[fullname] = module + + loader = importlib.util.LazyLoader(spec.loader) + loader.exec_module(module) + + return module diff --git a/pytorch_svgrender/libs/utils/logging.py b/pytorch_svgrender/libs/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..dd9828d4ad6b640cc0dd08583b4b762195ef1d96 --- /dev/null +++ b/pytorch_svgrender/libs/utils/logging.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import os +import sys +import errno + + +def get_logger(logs_dir: str, file_name: str = "log.txt"): + logger = PrintLogger(os.path.join(logs_dir, file_name)) + sys.stdout = logger # record all python print + return logger + + +class PrintLogger(object): + + def __init__(self, fpath=None): + """ + python standard input/output records + """ + self.console = sys.stdout + self.file = None + if fpath is not None: + mkdir_if_missing(os.path.dirname(fpath)) + self.file = open(fpath, 'w') + + def __del__(self): + self.close() + + def __enter__(self): + pass + + def __exit__(self, *args): + self.close() + + def write(self, msg): + self.console.write(msg) + if self.file is not None: + self.file.write(msg) + + def write_in(self, msg): + """write in log only, not console""" + if self.file is not None: + self.file.write(msg) + + def flush(self): + self.console.flush() + if self.file is not None: + self.file.flush() + os.fsync(self.file.fileno()) + + def close(self): + self.console.close() + if self.file is not None: + self.file.close() + + +def mkdir_if_missing(dir_path): + try: + os.makedirs(dir_path) + except OSError as e: + if e.errno != errno.EEXIST: + raise diff --git a/pytorch_svgrender/libs/utils/meter.py b/pytorch_svgrender/libs/utils/meter.py new file mode 100644 index 0000000000000000000000000000000000000000..bd32dfd7fa1d0193dcd914e4043b9a8d6b127ba5 --- /dev/null +++ b/pytorch_svgrender/libs/utils/meter.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from enum import Enum + +import torch +import torch.distributed as dist + + +class Summary(Enum): + NONE = 0 + AVERAGE = 1 + SUM = 2 + COUNT = 3 + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE): + self.name = name + self.fmt = fmt + self.summary_type = summary_type + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def all_reduce(self): + if torch.cuda.is_available(): + device = torch.device("cuda") + elif torch.backends.mps.is_available(): + device = torch.device("mps") + else: + device = torch.device("cpu") + + total = torch.tensor([self.sum, self.count], dtype=torch.float32, device=device) + dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False) + self.sum, self.count = total.tolist() + self.avg = self.sum / self.count + + def __str__(self): + fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' + return fmtstr.format(**self.__dict__) + + def summary(self): + fmtstr = '' + if self.summary_type is Summary.NONE: + fmtstr = '' + elif self.summary_type is Summary.AVERAGE: + fmtstr = '{name} {avg:.3f}' + elif self.summary_type is Summary.SUM: + fmtstr = '{name} {sum:.3f}' + elif self.summary_type is Summary.COUNT: + fmtstr = '{name} {count:.3f}' + else: + raise ValueError('invalid summary type %r' % self.summary_type) + + return fmtstr.format(**self.__dict__) diff --git a/pytorch_svgrender/libs/utils/misc.py b/pytorch_svgrender/libs/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..352127dc4fbc9932249fbbbc89d4731eaca6f25a --- /dev/null +++ b/pytorch_svgrender/libs/utils/misc.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import math + +import torch + + +def identity(t, *args, **kwargs): + """return t""" + return t + + +def exists(x): + """whether x is None or not""" + return x is not None + + +def default(val, d): + """ternary judgment: val != None ? val : d""" + if exists(val): + return val + return d() if callable(d) else d + + +def has_int_squareroot(num): + return (math.sqrt(num) ** 2) == num + + +def num_to_groups(num, divisor): + groups = num // divisor + remainder = num % divisor + arr = [divisor] * groups + if remainder > 0: + arr.append(remainder) + return arr + + +################################################################################# +# Model Utils # +################################################################################# + +def sum_params(model: torch.nn.Module, eps: float = 1e6): + return sum(p.numel() for p in model.parameters()) / eps + + +################################################################################# +# DataLoader Utils # +################################################################################# + +def cycle(dl): + while True: + for data in dl: + yield data + + +################################################################################# +# Diffusion Model Utils # +################################################################################# + +def extract(a, t, x_shape): + b, *_ = t.shape + assert x_shape[0] == b + out = a.gather(-1, t) # 1-D tensor, shape: (b,) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) # shape: [b, 1, 1, 1] + + +def unnormalize(x): + """unnormalize_to_zero_to_one""" + x = (x + 1) * 0.5 # Map the data interval to [0, 1] + return torch.clamp(x, 0.0, 1.0) + + +def normalize(x): + """normalize_to_neg_one_to_one""" + x = x * 2 - 1 # Map the data interval to [-1, 1] + return torch.clamp(x, -1.0, 1.0) diff --git a/pytorch_svgrender/libs/utils/model_summary.py b/pytorch_svgrender/libs/utils/model_summary.py new file mode 100644 index 0000000000000000000000000000000000000000..afd33987128d074d9e88dfa8aabed29abea1fcfb --- /dev/null +++ b/pytorch_svgrender/libs/utils/model_summary.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import sys +from collections import OrderedDict + +import numpy as np +import torch + +layer_modules = (torch.nn.MultiheadAttention,) + + +def summary(model, input_data=None, input_data_args=None, input_shape=None, input_dtype=torch.FloatTensor, + batch_size=-1, + *args, **kwargs): + """ + give example input data as least one way like below: + ① input_data ---> model.forward(input_data) + ② input_data_args ---> model.forward(*input_data_args) + ③ input_shape & input_dtype ---> model.forward(*[torch.rand(2, *size).type(input_dtype) for size in input_shape]) + """ + + hooks = [] + summary = OrderedDict() + + def register_hook(module): + def hook(module, inputs, outputs): + + class_name = str(module.__class__).split(".")[-1].split("'")[0] + module_idx = len(summary) + + key = "%s-%i" % (class_name, module_idx + 1) + + info = OrderedDict() + info["id"] = id(module) + if isinstance(outputs, (list, tuple)): + try: + info["out"] = [batch_size] + list(outputs[0].size())[1:] + except AttributeError: + # pack_padded_seq and pad_packed_seq store feature into data attribute + info["out"] = [batch_size] + list(outputs[0].data.size())[1:] + else: + info["out"] = [batch_size] + list(outputs.size())[1:] + + info["params_nt"], info["params"] = 0, 0 + for name, param in module.named_parameters(): + info["params"] += param.nelement() * param.requires_grad + info["params_nt"] += param.nelement() * (not param.requires_grad) + + summary[key] = info + + # ignore Sequential and ModuleList and other containers + if isinstance(module, layer_modules) or not module._modules: + hooks.append(module.register_forward_hook(hook)) + + model.apply(register_hook) + + # multiple inputs to the network + if isinstance(input_shape, tuple): + input_shape = [input_shape] + + if input_data is not None: + x = [input_data] + elif input_shape is not None: + # batch_size of 2 for batchnorm + x = [torch.rand(2, *size).type(input_dtype) for size in input_shape] + elif input_data_args is not None: + x = input_data_args + else: + x = [] + try: + with torch.no_grad(): + model(*x) if not (kwargs or args) else model(*x, *args, **kwargs) + except Exception: + # This can be usefull for debugging + print("Failed to run summary...") + raise + finally: + for hook in hooks: + hook.remove() + summary_logs = [] + summary_logs.append("--------------------------------------------------------------------------") + line_new = "{:<30} {:>20} {:>20}".format("Layer (type)", "Output Shape", "Param #") + summary_logs.append(line_new) + summary_logs.append("==========================================================================") + total_params = 0 + total_output = 0 + trainable_params = 0 + for layer in summary: + # layer, output_shape, params + line_new = "{:<30} {:>20} {:>20}".format( + layer, + str(summary[layer]["out"]), + "{0:,}".format(summary[layer]["params"] + summary[layer]["params_nt"]) + ) + total_params += (summary[layer]["params"] + summary[layer]["params_nt"]) + total_output += np.prod(summary[layer]["out"]) + trainable_params += summary[layer]["params"] + summary_logs.append(line_new) + + # assume 4 bytes/number + if input_data is not None: + total_input_size = abs(sys.getsizeof(input_data) / (1024 ** 2.)) + elif input_shape is not None: + total_input_size = abs(np.prod(input_shape) * batch_size * 4. / (1024 ** 2.)) + else: + total_input_size = 0.0 + total_output_size = abs(2. * total_output * 4. / (1024 ** 2.)) # x2 for gradients + total_params_size = abs(total_params * 4. / (1024 ** 2.)) + total_size = total_params_size + total_output_size + total_input_size + + summary_logs.append("==========================================================================") + summary_logs.append("Total params: {0:,}".format(total_params)) + summary_logs.append("Trainable params: {0:,}".format(trainable_params)) + summary_logs.append("Non-trainable params: {0:,}".format(total_params - trainable_params)) + summary_logs.append("--------------------------------------------------------------------------") + summary_logs.append("Input size (MB): %0.6f" % total_input_size) + summary_logs.append("Forward/backward pass size (MB): %0.6f" % total_output_size) + summary_logs.append("Params size (MB): %0.6f" % total_params_size) + summary_logs.append("Estimated Total Size (MB): %0.6f" % total_size) + summary_logs.append("--------------------------------------------------------------------------") + + summary_info = "\n".join(summary_logs) + + print(summary_info) + return summary_info diff --git a/pytorch_svgrender/libs/utils/tqdm.py b/pytorch_svgrender/libs/utils/tqdm.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a88005fabf467a16f9a5c75ff81cdb91326703 --- /dev/null +++ b/pytorch_svgrender/libs/utils/tqdm.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from typing import Callable +from tqdm.auto import tqdm + + +def tqdm_decorator(func: Callable): + """A decorator function called tqdm_decorator that takes a function as an argument and + returns a new function that wraps the input function with a tqdm progress bar. + + Noting: **The input function is assumed to have an object self as its first argument**, which contains a step attribute, + an args attribute with a train_num_steps attribute, and an accelerator attribute with an is_main_process attribute. + + Args: + func: tqdm_decorator + + Returns: + a new function that wraps the input function with a tqdm progress bar. + """ + + def wrapper(*args, **kwargs): + with tqdm(initial=args[0].step, + total=args[0].args.train_num_steps, + disable=not args[0].accelerator.is_main_process) as pbar: + func(*args, **kwargs, pbar=pbar) + + return wrapper diff --git a/pytorch_svgrender/painter/__init__.py b/pytorch_svgrender/painter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5671b84f044ee0876b561c425d0b76643fee9d51 --- /dev/null +++ b/pytorch_svgrender/painter/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Description: diff --git a/pytorch_svgrender/painter/clipascene/__init__.py b/pytorch_svgrender/painter/clipascene/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2b9842243c9b6e693c300751ed5a3f6c0cc0a673 --- /dev/null +++ b/pytorch_svgrender/painter/clipascene/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .loss import Loss +from .painter_params import Painter, PainterOptimizer + +__all__ = [ + 'Painter', 'PainterOptimizer', + 'Loss' +] diff --git a/pytorch_svgrender/painter/clipascene/lama_utils.py b/pytorch_svgrender/painter/clipascene/lama_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..38288fff37eb721f3cf4d6481f0506b059cdfd20 --- /dev/null +++ b/pytorch_svgrender/painter/clipascene/lama_utils.py @@ -0,0 +1,60 @@ +from pathlib import Path + +import cv2 +import numpy as np +import torch +import tqdm +import yaml +from lama.saicinpainting.evaluation.refinement import refine_predict +from lama.saicinpainting.evaluation.utils import move_to_device +from lama.saicinpainting.training.data.datasets import make_default_val_dataset +from lama.saicinpainting.training.trainers import load_checkpoint +from omegaconf import OmegaConf +from torch.utils.data._utils.collate import default_collate + + +def apply_inpaint(scene_path, background_path, device): + conf = OmegaConf.load('lama/configs/prediction/default.yaml') + model_path = Path("lama/big-lama") + train_config_path = model_path / 'config.yaml' + with open(train_config_path, 'r') as f: + train_config = OmegaConf.create(yaml.safe_load(f)) + + train_config.training_model.predict_only = True + train_config.visualizer.kind = 'noop' + + out_ext = conf.get('out_ext', '.png') + + checkpoint_path = model_path / 'models' / conf.model.checkpoint + model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location='cpu') + model.freeze() + if not conf.get('refine', False): + model.to(device) + + dataset = make_default_val_dataset(scene_path, **conf.dataset) + for img_i in tqdm.trange(len(dataset)): + mask_fname = Path(dataset.mask_filenames[img_i]) + relative_fname = mask_fname.relative_to(scene_path).with_suffix(out_ext) + cur_out_fname = background_path / relative_fname + cur_out_fname.parent.mkdir(parents=True, exist_ok=True) + batch = default_collate([dataset[img_i]]) + if conf.get('refine', False): + assert 'unpad_to_size' in batch, "Unpadded size is required for the refinement" + # image unpadding is taken care of in the refiner, so that output image + # is same size as the input image + cur_res = refine_predict(batch, model, **conf.refiner) + cur_res = cur_res[0].permute(1, 2, 0).detach().cpu().numpy() + else: + with torch.no_grad(): + batch = move_to_device(batch, device) + batch['mask'] = (batch['mask'] > 0) * 1 + batch = model(batch) + cur_res = batch[conf.out_key][0].permute(1, 2, 0).detach().cpu().numpy() + unpad_to_size = batch.get('unpad_to_size', None) + if unpad_to_size is not None: + orig_height, orig_width = unpad_to_size + cur_res = cur_res[:orig_height, :orig_width] + + cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8') + cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) + cv2.imwrite(cur_out_fname.as_posix(), cur_res) diff --git a/pytorch_svgrender/painter/clipascene/loss.py b/pytorch_svgrender/painter/clipascene/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..cb78d5fe8c090b6730b3ab558f69bf77d2f21b23 --- /dev/null +++ b/pytorch_svgrender/painter/clipascene/loss.py @@ -0,0 +1,804 @@ +import collections +import re + +import clip +import torch +import torch.nn as nn +from torchvision import models, transforms + + +def compute_grad_norm_losses(losses_dict, model, points_mlp): + ''' + Balances multiple losses by weighting them inversly proportional + to their overall gradient contribution. + + Args: + losses: A dictionary of losses. + model: A PyTorch model. + Returns: + A dictionary of loss weights. + ''' + grad_norms = {} + for loss_name, loss in losses_dict.items(): + loss.backward(retain_graph=True) + grad_sum = sum([w.grad.abs().sum().item() for w in model.parameters() if w.grad is not None]) + num_elem = sum([w.numel() for w in model.parameters() if w.grad is not None]) + grad_norms[loss_name] = grad_sum / num_elem + model.zero_grad() + points_mlp.zero_grad() + + grad_norms_total = sum(grad_norms.values()) + + loss_weights = {} + for loss_name, loss in losses_dict.items(): + weight = (grad_norms_total - grad_norms[loss_name]) / ((len(losses_dict) - 1) * grad_norms_total) + loss_weights[loss_name] = weight + + return loss_weights + + +class Loss(nn.Module): + def __init__(self, args, mask=None, device="cpu"): + super(Loss, self).__init__() + self.args = args + self.percep_loss = args.percep_loss + self.device = device + + self.train_with_clip = args.train_with_clip + self.clip_weight = args.clip_weight + self.start_clip = args.start_clip + + self.clip_conv_loss = args.clip_conv_loss + self.clip_mask_loss = args.clip_mask_loss + self.clip_fc_loss_weight = args.clip_fc_loss_weight + self.clip_text_guide = args.clip_text_guide + self.width_optim = args.width_optim + self.width_loss_weight = args.width_loss_weight + self.ratio_loss = args.ratio_loss + if isinstance(args.clip_conv_layer_weights, str): + self.args.clip_conv_layer_weights = [ + float(item) for item in args.clip_conv_layer_weights.split(',') + ] + + self.losses_to_apply = self.get_losses_to_apply() + self.gradnorm = args.gradnorm + if args.gradnorm: + self.new_weights = {} + + self.loss_mapper = {} + if self.clip_conv_loss: + self.loss_mapper["clip_conv_loss"] = CLIPConvLoss(args, mask, device) + if self.clip_mask_loss: + self.loss_mapper["clip_mask_loss"] = CLIPmaskLoss(args, mask, device) + if self.width_optim: + self.loss_mapper["width_loss"] = WidthLoss(args, device) + if self.ratio_loss: + self.loss_mapper["ratio_loss"] = RatioLoss(args, device) + + def get_losses_to_apply(self): + losses_to_apply = [] + if self.percep_loss != "none": + losses_to_apply.append(self.percep_loss) + if self.train_with_clip and self.start_clip == 0: + losses_to_apply.append("clip") + if self.clip_conv_loss: + losses_to_apply.append("clip_conv_loss") + if self.clip_mask_loss: + losses_to_apply.append("clip_mask_loss") + if self.clip_text_guide: + losses_to_apply.append("clip_text") + if self.width_optim: + losses_to_apply.append("width_loss") + if self.ratio_loss: + losses_to_apply.append("ratio_loss") + return losses_to_apply + + def update_losses_to_apply(self, epoch, width_opt=None, mode="train"): + if "clip" not in self.losses_to_apply: + if self.train_with_clip: + if epoch > self.start_clip: + self.losses_to_apply.append("clip") + # for width loss switch + if width_opt is not None: + if self.width_optim and "width_loss" not in self.losses_to_apply and mode == "eval": + self.losses_to_apply.append("width_loss") + if width_opt and "width_loss" not in self.losses_to_apply: + self.losses_to_apply.append("width_loss") + if not width_opt and "width_loss" in self.losses_to_apply and mode == "train": + self.losses_to_apply.remove("width_loss") + + def forward(self, sketches, targets, epoch, widths=None, renderer=None, optimizer=None, mode="train", + width_opt=None): + loss = 0 + self.update_losses_to_apply(epoch, width_opt, mode) + + losses_dict = {} + loss_coeffs = {} + if self.width_optim: + loss_coeffs["width_loss"] = self.width_loss_weight + + clip_loss_names = [] + for loss_name in self.losses_to_apply: + if loss_name in ["clip_conv_loss", "clip_mask_loss"]: + conv_loss = self.loss_mapper[loss_name]( + sketches, targets, mode) + for layer in conv_loss.keys(): + if "normalization" in layer: + loss_coeffs[layer] = 0 # include layer 11 in gradnorm but not in final loss + losses_dict[layer] = conv_loss[layer] + else: + layer_w_index = int(re.findall(r'\d+', layer)[0]) # get the layer's number + losses_dict[layer] = conv_loss[layer] + loss_coeffs[layer] = self.args.clip_conv_layer_weights[layer_w_index] + clip_loss_names.append(layer) + elif loss_name == "width_loss": + losses_dict[loss_name] = self.loss_mapper[loss_name](widths, renderer.get_strokes_in_canvas_count()) + elif loss_name == "l2": + losses_dict[loss_name] = self.loss_mapper[loss_name]( + sketches, targets).mean() + elif loss_name == "ratio_loss": + continue + else: + losses_dict[loss_name] = self.loss_mapper[loss_name](sketches, targets, mode).mean() + + losses_dict_original = losses_dict.copy() + if self.gradnorm: + if mode == "train": + if self.width_optim: + self.new_weights = compute_grad_norm_losses(losses_dict, renderer.get_width_mlp(), + renderer.get_mlp()) + else: + self.new_weights = compute_grad_norm_losses(losses_dict, renderer.get_mlp(), renderer.get_mlp()) + # if mode is eval, take the norm wieghts of prev step, since we don't have grads here + + for key in losses_dict.keys(): + # losses_dict_copy[key] = losses_dict_copy[key] * self.new_weights[key] + losses_dict[key] = losses_dict[key] * self.new_weights[key] + + losses_dict_copy = {} # return the normalised losses before weighting + for k_ in losses_dict.keys(): + losses_dict_copy[k_] = losses_dict[k_].clone().detach() + for key in losses_dict.keys(): + # loss = loss + losses_dict[key] * loss_coeffs[key] + if loss_coeffs[key] == 0: + losses_dict[key] = losses_dict[key].detach() * loss_coeffs[key] + else: + losses_dict[key] = losses_dict[key] * loss_coeffs[key] + + if self.ratio_loss: + losses_dict["ratio_loss"] = self.loss_mapper["ratio_loss"](losses_dict_original, clip_loss_names).mean() + + losses_dict_original_detach = {} + for k_ in losses_dict_original.keys(): + losses_dict_original_detach[k_] = losses_dict_original[k_].clone().detach() + + return losses_dict, losses_dict_copy, losses_dict_original_detach + + +class CLIPLoss(torch.nn.Module): + def __init__(self, args, device): + super(CLIPLoss, self).__init__() + + self.args = args + self.device = device + self.model, clip_preprocess = clip.load( + 'ViT-B/32', self.device, jit=False) + self.model.eval() + self.preprocess = transforms.Compose( + [clip_preprocess.transforms[-1]]) # clip normalisation + self.NUM_AUGS = args.num_aug_clip + augemntations = [] + if "affine" in args.augemntations: + augemntations.append(transforms.RandomPerspective( + fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop( + 224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + augemntations.append( + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))) + self.augment_trans = transforms.Compose(augemntations) + + self.calc_target = True + self.include_target_in_aug = args.include_target_in_aug + self.counter = 0 + self.augment_both = args.augment_both + + def forward(self, sketches, targets, mode="train"): + if self.calc_target: + targets_ = self.preprocess(targets).to(self.device) + self.targets_features = self.model.encode_image(targets_).detach() + self.calc_target = False + + if mode == "eval": + # for regular clip distance, no augmentations + with torch.no_grad(): + sketches = self.preprocess(sketches).to(self.device) + sketches_features = self.model.encode_image(sketches) + return 1. - torch.cosine_similarity(sketches_features, self.targets_features) + + loss_clip = 0 + sketch_augs = [] + img_augs = [] + for n in range(self.NUM_AUGS): + augmented_pair = self.augment_trans(torch.cat([sketches, targets])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + + sketch_batch = torch.cat(sketch_augs) + sketch_features = self.model.encode_image(sketch_batch) + for n in range(self.NUM_AUGS): + loss_clip += (1. - torch.cosine_similarity( + sketch_features[n:n + 1], self.targets_features, dim=1)) + self.counter += 1 + return loss_clip + # return 1. - torch.cosine_similarity(sketches_features, self.targets_features) + + +class LPIPS(torch.nn.Module): + def __init__(self, pretrained=True, normalize=True, pre_relu=True, device=None): + """ + Args: + pre_relu(bool): if True, selects features **before** reLU activations + """ + super(LPIPS, self).__init__() + # VGG using perceptually-learned weights (LPIPS metric) + self.normalize = normalize + self.pretrained = pretrained + augemntations = [] + augemntations.append(transforms.RandomPerspective( + fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop( + 224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + self.augment_trans = transforms.Compose(augemntations) + self.feature_extractor = LPIPS._FeatureExtractor( + pretrained, pre_relu).to(device) + + def _l2_normalize_features(self, x, eps=1e-10): + nrm = torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) + return x / (nrm + eps) + + def forward(self, pred, target, mode="train"): + """Compare VGG features of two inputs.""" + + # Get VGG features + + sketch_augs, img_augs = [pred], [target] + if mode == "train": + for n in range(4): + augmented_pair = self.augment_trans(torch.cat([pred, target])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + img_augs.append(augmented_pair[1].unsqueeze(0)) + + xs = torch.cat(sketch_augs, dim=0) + ys = torch.cat(img_augs, dim=0) + + pred = self.feature_extractor(xs) + target = self.feature_extractor(ys) + + # L2 normalize features + if self.normalize: + pred = [self._l2_normalize_features(f) for f in pred] + target = [self._l2_normalize_features(f) for f in target] + + # TODO(mgharbi) Apply Richard's linear weights? + + if self.normalize: + diffs = [torch.sum((p - t) ** 2, 1) + for (p, t) in zip(pred, target)] + else: + # mean instead of sum to avoid super high range + diffs = [torch.mean((p - t) ** 2, 1) + for (p, t) in zip(pred, target)] + + # Spatial average + diffs = [diff.mean([1, 2]) for diff in diffs] + + return sum(diffs) + + class _FeatureExtractor(torch.nn.Module): + def __init__(self, pretrained, pre_relu): + super(LPIPS._FeatureExtractor, self).__init__() + vgg_pretrained = models.vgg16(pretrained=pretrained).features + + self.breakpoints = [0, 4, 9, 16, 23, 30] + if pre_relu: + for i, _ in enumerate(self.breakpoints[1:]): + self.breakpoints[i + 1] -= 1 + + # Split at the maxpools + for i, b in enumerate(self.breakpoints[:-1]): + ops = torch.nn.Sequential() + for idx in range(b, self.breakpoints[i + 1]): + op = vgg_pretrained[idx] + ops.add_module(str(idx), op) + # print(ops) + self.add_module("group{}".format(i), ops) + + # No gradients + for p in self.parameters(): + p.requires_grad = False + + # Torchvision's normalization: <https://github.com/pytorch/examples/blob/42e5b996718797e45c46a25c55b031e6768f8440/imagenet/main.py#L89-L101> + self.register_buffer("shift", torch.Tensor( + [0.485, 0.456, 0.406]).view(1, 3, 1, 1)) + self.register_buffer("scale", torch.Tensor( + [0.229, 0.224, 0.225]).view(1, 3, 1, 1)) + + def forward(self, x): + feats = [] + x = (x - self.shift) / self.scale + for idx in range(len(self.breakpoints) - 1): + m = getattr(self, "group{}".format(idx)) + x = m(x) + feats.append(x) + return feats + + +class WidthLoss(torch.nn.Module): + def __init__(self, args, device): + super(WidthLoss, self).__init__() + self.width_loss_type = args.width_loss_type + self.width_loss_weight = args.width_loss_weight + self.zero = torch.tensor(0).to(device) + + def forward(self, widths, strokes_in_canvas_count): + sum_w = torch.sum(widths) + if self.width_loss_type == "L1_hinge": # this option is deprecated + return torch.max(self.zero, sum_w - self.width_loss_weight) + return sum_w / strokes_in_canvas_count + + +class RatioLoss(torch.nn.Module): + def __init__(self, args, device): + super(RatioLoss, self).__init__() + self.target_ratio = args.ratio_loss + self.mse_loss = nn.MSELoss() + + def forward(self, losses_dict_original, clip_loss_names): + loss_clip = 0 + for clip_loss in clip_loss_names: + loss_clip = loss_clip + losses_dict_original[clip_loss] + loss_clip = loss_clip * self.target_ratio + width_loss = losses_dict_original["width_loss"] + return self.mse_loss(width_loss, loss_clip) + + +class L2_(torch.nn.Module): + def __init__(self): + """ + Args: + pre_relu(bool): if True, selects features **before** reLU activations + """ + super(L2_, self).__init__() + # VGG using perceptually-learned weights (LPIPS metric) + augemntations = [] + augemntations.append(transforms.RandomPerspective( + fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop( + 224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + augemntations.append( + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))) + self.augment_trans = transforms.Compose(augemntations) + # LOG.warning("LPIPS is untested") + + def forward(self, pred, target, mode="train"): + """Compare VGG features of two inputs.""" + + # Get VGG features + + sketch_augs, img_augs = [pred], [target] + if mode == "train": + for n in range(4): + augmented_pair = self.augment_trans(torch.cat([pred, target])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + img_augs.append(augmented_pair[1].unsqueeze(0)) + + pred = torch.cat(sketch_augs, dim=0) + target = torch.cat(img_augs, dim=0) + diffs = [torch.square(p - t).mean() for (p, t) in zip(pred, target)] + return sum(diffs) + + +class CLIPVisualEncoder(nn.Module): + def __init__(self, clip_model, device, mask_cls="none", apply_mask=False, mask_attention=False): + super().__init__() + self.clip_model = clip_model + self.featuremaps = None + self.device = device + self.n_channels = 3 + self.kernel_h = 32 + self.kernel_w = 32 + self.step = 32 + self.num_patches = 49 + self.mask_cls = mask_cls + self.apply_mask = apply_mask + self.mask_attention = mask_attention + + for i in range(12): # 12 resblocks in VIT visual transformer + self.clip_model.visual.transformer.resblocks[i].register_forward_hook( + self.make_hook(i)) + + def make_hook(self, name): + def hook(module, input, output): + if len(output.shape) == 3: + self.featuremaps[name] = output.permute( + 1, 0, 2) # LND -> NLD bs, smth, 768 + else: + self.featuremaps[name] = output + + return hook + + def forward(self, x, masks=None, mode="train"): + masks_flat = torch.ones((x.shape[0], 50, 768)).to(self.device) # without any effect + attn_map = None + if masks is not None and self.apply_mask: + x_copy = x.detach().clone() + + patches_x = x_copy.unfold(2, self.kernel_h, self.step).unfold(3, self.kernel_w, self.step).reshape(-1, + self.n_channels, + self.num_patches, + 32, 32) + # split the masks into patches (the same input patches to the transformer) + # shape is (batch_size, channel, num_patches, patch_size, patch_size) = (5, 3, 49, 32, 32) + patches_mask = masks.unfold(2, self.kernel_h, self.step).unfold(3, self.kernel_w, self.step).reshape(-1, + self.n_channels, + self.num_patches, + 32, 32) + # masks_ is a binary mask (batch_size, 1, 7, ,7) to say which patch should be masked out + masks_ = torch.ones((x.shape[0], 1, 7, 7)).to(self.device) + for i in range(masks.shape[0]): + for j in range(self.num_patches): + # we mask a patch if more than 20% of the patch is masked + zeros = (patches_mask[i, 0, j] == 0).sum() / (self.kernel_w * self.kernel_h) + if zeros > 0.2: + masks_[i, :, j // 7, j % 7] = 0 + + if self.mask_attention: + mask2 = masks_[:, 0].reshape(-1, 49).to(self.device) # .to(device) shape (5, 49) + mask2 = torch.cat([torch.ones(mask2.shape[0], 1).to(self.device), mask2], dim=-1) + mask2 = mask2.unsqueeze(1) + attn_map = mask2.repeat(1, 50, 1).to(self.device) # 5, 50, 50 + attn_map[:, 0, 0] = 1 + attn_map = 1 - attn_map + indixes = (attn_map == 0).nonzero() # shape [136, 2] [[aug_im],[index]] + attn_map = attn_map.repeat(12, 1, 1).bool() # [60, 50, 50] + + # masks_flat's shape is (5, 49), for each image in the batch we have 49 flags indicating if to mask the i'th patch or not + masks_flat = masks_[:, 0].reshape(-1, self.num_patches) + + # now we add the cls token mask, it's all ones for now since we want to leave it + # now the shape is (5, 50) where the first number in each of the 5 rows is 1 (meaning - son't mask the cls token) + masks_flat = torch.cat([torch.ones(masks_flat.shape[0], 1).to(self.device), masks_flat], + dim=1) # include cls by default + # now we duplicate this from (5, 50) to (5, 50, 768) to match the tokens dimentions + masks_flat = masks_flat.unsqueeze(2).repeat(1, 1, 768) # shape is (5, 50, 768) + + elif self.mask_cls != "none": + if self.mask_cls == "only_cls": + masks_flat = torch.zeros((5, 50, 768)).to(self.device) + masks_flat[:, 0, :] = 1 + elif self.mask_cls == "cls_out": + masks_flat[:, 0, :] = 0 + + self.featuremaps = collections.OrderedDict() + fc_features = self.clip_model.encode_image(x).float() + featuremaps = [self.featuremaps[k] * masks_flat for k in range(12)] + + return fc_features, featuremaps + + +def l2_layers(xs_conv_features, ys_conv_features, clip_model_name): + return [torch.square(x_conv - y_conv).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + + +def l1_layers(xs_conv_features, ys_conv_features, clip_model_name): + return [torch.abs(x_conv - y_conv).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + + +def cos_layers(xs_conv_features, ys_conv_features, clip_model_name): + if "RN" in clip_model_name: + return [torch.square(x_conv, y_conv, dim=1).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + return [(1 - torch.cosine_similarity(x_conv, y_conv, dim=1)).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + + +class CLIPConvLoss(torch.nn.Module): + def __init__(self, args, mask, device): + # mask is a binary tensor with shape (1,3,224,224) + super(CLIPConvLoss, self).__init__() + self.device = device + + self.mask = mask + self.loss_mask = args.loss_mask + assert self.loss_mask in ["none", "back", "for"] + self.apply_mask = (self.loss_mask != "none") + if self.loss_mask == "for": + # default for the mask is to mask out the background + # if mask loss is for it means we want to maskout the foreground + self.mask = 1 - mask + + self.clip_model_name = args.clip_model_name + assert self.clip_model_name in [ + "RN50", + "RN101", + "RN50x4", + "RN50x16", + "ViT-B/32", + "ViT-B/16", + ] + + self.clip_conv_loss_type = args.clip_conv_loss_type + self.clip_fc_loss_type = "Cos" # args.clip_fc_loss_type + assert self.clip_conv_loss_type in [ + "L2", "Cos", "L1", + ] + assert self.clip_fc_loss_type in [ + "L2", "Cos", "L1", + ] + + self.distance_metrics = \ + { + "L2": l2_layers, + "L1": l1_layers, + "Cos": cos_layers + } + + self.model, clip_preprocess = clip.load( + self.clip_model_name, self.device, jit=False) + + if self.clip_model_name.startswith("ViT"): + self.loss_log_name = "vit" + self.visual_encoder = CLIPVisualEncoder(self.model, self.device) + self.l11_norm = False + + else: + self.loss_log_name = "rn" + self.visual_model = self.model.visual + layers = list(self.model.visual.children()) + init_layers = torch.nn.Sequential(*layers)[:8] + self.layer1 = layers[8] + self.layer2 = layers[9] + self.layer3 = layers[10] + self.layer4 = layers[11] + self.att_pool2d = layers[12] + + self.args = args + + self.img_size = clip_preprocess.transforms[1].size + self.model.eval() + self.target_transform = transforms.Compose([ + transforms.ToTensor(), + ]) # clip normalisation + self.normalize_transform = transforms.Compose([ + clip_preprocess.transforms[0], # Resize + clip_preprocess.transforms[1], # CenterCrop + clip_preprocess.transforms[-1], # Normalize + ]) + + self.model.eval() + + self.num_augs = self.args.num_aug_clip + + augemntations = [] + if "affine" in args.augemntations: + augemntations.append(transforms.RandomPerspective( + fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop( + 224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + augemntations.append( + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))) + self.augment_trans = transforms.Compose(augemntations) + + self.clip_fc_layer_dims = None # self.args.clip_fc_layer_dims + self.clip_conv_layer_dims = None # self.args.clip_conv_layer_dims + self.clip_fc_loss_weight = args.clip_fc_loss_weight + self.counter = 0 + + def forward(self, sketch, target, mode="train"): + """ + Parameters + ---------- + sketch: Torch Tensor [1, C, H, W] + target: Torch Tensor [1, C, H, W] + """ + conv_loss_dict = {} + if self.apply_mask: + sketch *= self.mask + + x = sketch.to(self.device) + y = target.to(self.device) + + sketch_augs, img_augs = [self.normalize_transform(x)], [ + self.normalize_transform(y)] + if mode == "train": + for n in range(self.num_augs): + augmented_pair = self.augment_trans(torch.cat([x, y])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + img_augs.append(augmented_pair[1].unsqueeze(0)) + + xs = torch.cat(sketch_augs, dim=0).to(self.device) + ys = torch.cat(img_augs, dim=0).to(self.device) + + if self.clip_model_name.startswith("RN"): + xs_fc_features, xs_conv_features = self.forward_inspection_clip_resnet( + xs.contiguous()) + ys_fc_features, ys_conv_features = self.forward_inspection_clip_resnet( + ys.detach()) + + else: + xs_fc_features, xs_conv_features = self.visual_encoder(xs, mode=mode) + ys_fc_features, ys_conv_features = self.visual_encoder(ys, mode=mode) + + conv_loss = self.distance_metrics[self.clip_conv_loss_type]( + xs_conv_features, ys_conv_features, self.clip_model_name) + + for layer, w in enumerate(self.args.clip_conv_layer_weights): + if w: + conv_loss_dict[f"clip_{self.loss_log_name}_l{layer}"] = conv_loss[layer] + if layer == 11 and self.l11_norm: + conv_loss_dict[f"clip_{self.loss_log_name}_l{layer}_normalization"] = conv_loss[layer] + + if self.clip_fc_loss_weight: + # fc distance is always cos + # fc_loss = torch.nn.functional.mse_loss(xs_fc_features, ys_fc_features).mean() + fc_loss = (1 - torch.cosine_similarity(xs_fc_features, + ys_fc_features, dim=1)).mean() + conv_loss_dict[f"fc_{self.loss_log_name}"] = fc_loss * self.clip_fc_loss_weight + + self.counter += 1 + return conv_loss_dict + + def forward_inspection_clip_resnet(self, x): + def stem(m, x): + for conv, bn in [(m.conv1, m.bn1), (m.conv2, m.bn2), (m.conv3, m.bn3)]: + x = m.relu(bn(conv(x))) + x = m.avgpool(x) + return x + + x = x.type(self.visual_model.conv1.weight.dtype) + x = stem(self.visual_model, x) + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + y = self.att_pool2d(x4) + return y, [x, x1, x2, x3, x4] + + +class CLIPmaskLoss(torch.nn.Module): + def __init__(self, args, mask, device): + super(CLIPmaskLoss, self).__init__() + self.args = args + self.mask = mask + self.device = device + self.loss_mask = args.loss_mask + assert self.loss_mask in ["none", "back", "for", "back_latent", "for_latent"] + self.apply_mask = (self.loss_mask != "none") + self.dilated_mask = args.dilated_mask + if self.dilated_mask: + kernel_tensor = torch.ones((1, 1, 11, 11)).to(self.device) + mask_ = torch.clamp( + torch.nn.functional.conv2d(mask[:, 0, :, :].unsqueeze(1), kernel_tensor, padding=(5, 5)), 0, 1) + mask = torch.cat([mask_, mask_, mask_], axis=1) + + if "for" in self.loss_mask: + # default for the mask is to mask out the background + # if mask loss is for it means we want to maskout the foreground + self.mask = 1 - mask + + self.clip_model_name = args.clip_model_name + self.clip_for_model_name = "RN101" + self.valid_models = [ + "RN50", + "RN101", + "RN50x4", + "RN50x16", + "ViT-B/32", + "ViT-B/16", + ] + assert self.clip_model_name in self.valid_models and self.clip_for_model_name in self.valid_models + + self.clip_conv_layer_weights = args.clip_conv_layer_weights + self.clip_conv_loss_type = args.clip_conv_loss_type + self.clip_fc_loss_type = "Cos" + self.num_augs = args.num_aug_clip + + self.distance_metrics = \ + { + "L2": l2_layers, + "L1": l1_layers, + "Cos": cos_layers + } + + # background model (ViT) + self.model, clip_preprocess = clip.load( + self.clip_model_name, self.device, jit=False) + self.model.eval() + if self.clip_model_name.startswith("ViT"): + self.visual_encoder = CLIPVisualEncoder(self.model, self.device, args.mask_cls, self.apply_mask, + args.mask_attention) + + self.img_size = clip_preprocess.transforms[1].size + + self.target_transform = transforms.Compose([ + transforms.ToTensor(), + ]) # clip normalisation + self.normalize_transform = transforms.Compose([ + # clip_preprocess.transforms[0], # Resize + # clip_preprocess.transforms[1], # CenterCrop + clip_preprocess.transforms[-1], # Normalize + ]) + + augemntations = [] + augemntations.append(transforms.RandomPerspective( + fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop( + 224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + # augemntations.append(transforms.RandomResizedCrop( + # 224, scale=(0.4, 0.9), ratio=(1.0, 1.0))) + + self.augment_trans = transforms.Compose(augemntations) + self.clip_fc_layer_dims = None # self.args.clip_fc_layer_dims + self.clip_conv_layer_dims = None # self.args.clip_conv_layer_dims + self.clip_fc_loss_weight = 0 + self.counter = 0 + + def forward(self, sketch, target, mode="train"): + """ + Parameters + ---------- + sketch: Torch Tensor [1, C, H, W] + target: Torch Tensor [1, C, H, W] + """ + conv_loss_dict = {} + + x = sketch.to(self.device) + y = target.to(self.device) + sketch_augs, img_augs, masks = [x], [y], [self.mask] + if mode == "train": + for n in range(self.num_augs): + augmented_pair = self.augment_trans(torch.cat([x, y, self.mask])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + img_augs.append(augmented_pair[1].unsqueeze(0)) + masks.append(augmented_pair[2].unsqueeze(0)) + xs = torch.cat(sketch_augs, dim=0).to(self.device) + ys = torch.cat(img_augs, dim=0).to(self.device) + masks = torch.cat(masks, dim=0).to(self.device) + masks[masks < 0.5] = 0 + masks[masks >= 0.5] = 1 + # background pass + if self.apply_mask and "latent" not in self.loss_mask: + # if "latent" not in self.loss_mask: + xs_back = self.normalize_transform(xs * masks) + else: + xs_back = self.normalize_transform(xs) + ys_back = self.normalize_transform(ys) + if "latent" not in self.loss_mask: + masks = None + xs_fc_features, xs_conv_features = self.visual_encoder(xs_back, masks, mode=mode) + ys_fc_features, ys_conv_features = self.visual_encoder(ys_back, masks, mode=mode) + conv_loss = self.distance_metrics[self.clip_conv_loss_type]( + xs_conv_features, ys_conv_features, self.clip_model_name) + for layer, w in enumerate(self.clip_conv_layer_weights): + if w: + conv_loss_dict[f"clip_vit_l{layer}"] = conv_loss[layer] * w + + self.counter += 1 + return conv_loss_dict + + def forward_inspection_clip_resnet(self, x): + def stem(m, x): + for conv, bn in [(m.conv1, m.bn1), (m.conv2, m.bn2), (m.conv3, m.bn3)]: + x = m.relu(bn(conv(x))) + x = m.avgpool(x) + return x + + x = x.type(self.visual_model.conv1.weight.dtype) + x = stem(self.visual_model, x) + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + y = self.att_pool2d(x4) + return y, [x, x1, x2, x3, x4] diff --git a/pytorch_svgrender/painter/clipascene/painter_params.py b/pytorch_svgrender/painter/clipascene/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..1839ebc81ed1113f3b522e8eb62a7c714689629c --- /dev/null +++ b/pytorch_svgrender/painter/clipascene/painter_params.py @@ -0,0 +1,771 @@ +import pathlib +import random + +import numpy as np +import omegaconf +import pydiffvg +import torch +import torch.nn as nn +from PIL import Image +from pytorch_svgrender.diffvg_warp import DiffVGState +from pytorch_svgrender.libs.modules.edge_map.DoG import XDoG +from pytorch_svgrender.painter.clipasso import modified_clip as clip +from pytorch_svgrender.painter.clipasso.grad_cam import gradCAM +from torchvision import transforms + + +class Painter(DiffVGState): + + def __init__( + self, + method_cfg: omegaconf.DictConfig, + diffvg_cfg: omegaconf.DictConfig, + num_strokes: int = 4, + canvas_size: int = 224, + device=None, + target_im=None, + mask=None + ): + super(Painter, self).__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + + self.args = method_cfg + self.num_paths = num_strokes + self.num_segments = method_cfg.num_segments + self.width = method_cfg.width + self.control_points_per_seg = method_cfg.control_points_per_seg + self.num_control_points = torch.zeros(self.num_segments, dtype=torch.int32) + (self.control_points_per_seg - 2) + + self.opacity_optim = method_cfg.force_sparse + self.num_stages = method_cfg.num_stages + self.noise_thresh = method_cfg.noise_thresh + self.softmax_temp = method_cfg.softmax_temp + + self.add_random_noise = "noise" in method_cfg.augemntations + self.optimize_points = method_cfg.optimize_points + self.optimize_points_global = method_cfg.optimize_points + self.points_init = [] # for mlp training + + self.color_vars_threshold = method_cfg.color_vars_threshold + + self.path_svg = method_cfg.path_svg + self.strokes_per_stage = self.num_paths + self.optimize_flag = [] + + # attention related for strokes initialisation + self.attention_init = method_cfg.attention_init + self.saliency_model = method_cfg.saliency_model + self.xdog_intersec = method_cfg.xdog_intersec + self.mask_object_attention = method_cfg.mask_object_attention + + self.text_target = method_cfg.text_target # for clip gradients + self.saliency_clip_model = method_cfg.saliency_clip_model + self.image2clip_input = self.clip_preprocess(target_im) + + self.mask = mask + self.attention_map = self.set_attention_map() if self.attention_init else None + + self.thresh = self.set_attention_threshold_map() if self.attention_init else None + self.strokes_counter = 0 # counts the number of calls to "get_path" + self.epoch = 0 + self.final_epoch = method_cfg.num_iter - 1 + + if "for" in method_cfg.loss_mask: + # default for the mask is to mask out the background + # if mask loss is for it means we want to maskout the foreground + self.mask = 1 - mask + + self.mlp_train = method_cfg.mlp_train + self.width_optim = method_cfg.width_optim + self.width_optim_global = method_cfg.width_optim + + if self.width_optim: + self.init_widths = torch.ones((self.num_paths)).to(device) * 1.5 + self.mlp_width = WidthMLP(num_strokes=self.num_paths, num_cp=self.control_points_per_seg, + width_optim=self.width_optim).to(device) + self.mlp_width_weights_path = method_cfg.mlp_width_weights_path + self.mlp_width_weight_init() + self.gumbel_temp = method_cfg.gumbel_temp + self.mlp = MLP(num_strokes=self.num_paths, num_cp=self.control_points_per_seg, width_optim=self.width_optim).to( + device) if self.mlp_train else None + self.mlp_points_weights_path = method_cfg.mlp_points_weights_path + self.mlp_points_weight_init() + self.out_of_canvas_mask = torch.ones((self.num_paths)).to(self.device) + + def turn_off_points_optim(self): + self.optimize_points = False + + def switch_opt(self): + self.width_optim = not self.width_optim + self.optimize_points = not self.optimize_points + + def mlp_points_weight_init(self): + if self.mlp_points_weights_path != "none": + checkpoint = torch.load(self.mlp_points_weights_path) + self.mlp.load_state_dict(checkpoint['model_state_dict']) + print("mlp checkpoint loaded from ", self.mlp_points_weights_path) + + def mlp_width_weight_init(self): + if self.mlp_width_weights_path == "none": + self.mlp_width.apply(init_weights) + else: + checkpoint = torch.load(self.mlp_width_weights_path) + self.mlp_width.load_state_dict(checkpoint['model_state_dict']) + print("mlp checkpoint loaded from ", self.mlp_width_weights_path) + + def init_image(self, stage=0): + if stage > 0: + # Noting: if multi stages training than add new strokes on existing ones + # don't optimize on previous strokes + self.optimize_flag = [False for i in range(len(self.shapes))] + for i in range(self.strokes_per_stage): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag.append(True) + else: + num_paths_exists = 0 + if self.path_svg is not None and pathlib.Path(self.path_svg).exists(): + print(f"-> init svg from `{self.path_svg}` ...") + + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(self.path_svg) + # if you want to add more strokes to existing ones and optimize on all of them + num_paths_exists = len(self.shapes) + for path in self.shapes: + self.points_init.append(path.points) + for i in range(num_paths_exists, self.num_paths): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag = [True for i in range(len(self.shapes))] + + def get_image(self, mode="train"): + if self.mlp_train: + img = self.mlp_pass(mode) + else: + img = self.render_warp(mode) + opacity = img[:, :, 3:4] + img = opacity * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - opacity) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def mlp_pass(self, mode, eps=1e-4): + """ + update self.shapes etc through mlp pass instead of directly (should be updated with the optimizer as well). + """ + if self.optimize_points_global: + points_vars = self.points_init + # reshape and normalise to [-1,1] range + points_vars = torch.stack(points_vars).unsqueeze(0).to(self.device) + points_vars = points_vars / self.canvas_width + points_vars = 2 * points_vars - 1 + if self.optimize_points: + points = self.mlp(points_vars) + else: + with torch.no_grad(): + points = self.mlp(points_vars) + + else: + points = torch.stack(self.points_init).unsqueeze(0).to(self.device) + + if self.width_optim and mode != "init": # first iter use just the location mlp + widths_ = self.mlp_width(self.init_widths).clamp(min=1e-8) + mask_flipped = (1 - widths_).clamp(min=1e-8) + v = torch.stack((torch.log(widths_), torch.log(mask_flipped)), dim=-1) + hard_mask = torch.nn.functional.gumbel_softmax(v, self.gumbel_temp, False) + self.stroke_probs = hard_mask[:, 0] * self.out_of_canvas_mask + self.widths = self.stroke_probs * self.init_widths + + # normalize back to canvas size [0, 224] and reshape + all_points = 0.5 * (points + 1.0) * self.canvas_width + all_points = all_points + eps * torch.randn_like(all_points) + all_points = all_points.reshape((-1, self.num_paths, self.control_points_per_seg, 2)) + + if self.width_optim_global and not self.width_optim: + self.widths = self.widths.detach() + # all_points = all_points.detach() + + # define new primitives to render + shapes = [] + shape_groups = [] + for p in range(self.num_paths): + width = torch.tensor(self.width) + if self.width_optim_global and mode != "init": + width = self.widths[p] + path = pydiffvg.Path( + num_control_points=self.num_control_points, points=all_points[:, p].reshape((-1, 2)), + stroke_width=width, is_closed=False) + if mode == "init": + # do once at the begining, define a mask for strokes that are outside the canvas + is_in_canvas_ = self.is_in_canvas(self.canvas_width, self.canvas_height, path) + if not is_in_canvas_: + self.out_of_canvas_mask[p] = 0 + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=torch.tensor([0, 0, 0, 1])) + shape_groups.append(path_group) + + _render = pydiffvg.RenderFunction.apply + scene_method_cfg = pydiffvg.RenderFunction.serialize_scene( \ + self.canvas_width, self.canvas_height, shapes, shape_groups) + img = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_method_cfg) + self.shapes = shapes.copy() + self.shape_groups = shape_groups.copy() + return img + + def get_path(self): + points = [] + p0 = self.inds_normalised[self.strokes_counter] if self.attention_init else (random.random(), random.random()) + points.append(p0) + + for j in range(self.num_segments): + radius = 0.05 + for k in range(self.control_points_per_seg - 1): + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + points.append(p1) + p0 = p1 + points = torch.tensor(points).to(self.device) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + + self.points_init.append(points) + path = pydiffvg.Path(num_control_points=self.num_control_points, + points=points, + stroke_width=torch.tensor(self.width), + is_closed=False) + self.strokes_counter += 1 + return path + + def render_warp(self, mode): + if not self.mlp_train: + if self.opacity_optim: + for group in self.shape_groups: + group.stroke_color.data[:3].clamp_(0., 0.) # to force black stroke + group.stroke_color.data[-1].clamp_(0., 1.) # opacity + # group.stroke_color.data[-1] = (group.stroke_color.data[-1] >= self.color_vars_threshold).float() + # uncomment if you want to add random noise + if self.add_random_noise: + if random.random() > self.noise_thresh: + eps = 0.01 * min(self.canvas_width, self.canvas_height) + for path in self.shapes: + path.points.data.add_(eps * torch.randn_like(path.points)) + + if self.width_optim and mode != "init": + widths_ = self.mlp_width(self.init_widths).clamp(min=1e-8) + mask_flipped = 1 - widths_ + v = torch.stack((torch.log(widths_), torch.log(mask_flipped)), dim=-1) + hard_mask = torch.nn.functional.gumbel_softmax(v, self.gumbel_temp, False) + self.stroke_probs = hard_mask[:, 0] * self.out_of_canvas_mask + self.widths = self.stroke_probs * self.init_widths + + if self.optimize_points: + _render = pydiffvg.RenderFunction.apply + scene_method_cfg = pydiffvg.RenderFunction.serialize_scene( \ + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups) + img = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_method_cfg) + else: + points = torch.stack(self.points_init).unsqueeze(0).to(self.device) + shapes = [] + shape_groups = [] + for p in range(self.num_paths): + width = torch.tensor(self.width) + if self.width_optim: + width = self.widths[p] + path = pydiffvg.Path( + num_control_points=self.num_control_points, points=points[:, p].reshape((-1, 2)), + stroke_width=width, is_closed=False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=torch.tensor([0, 0, 0, 1])) + shape_groups.append(path_group) + + _render = pydiffvg.RenderFunction.apply + scene_method_cfg = pydiffvg.RenderFunction.serialize_scene( \ + self.canvas_width, self.canvas_height, shapes, shape_groups) + img = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_method_cfg) + self.shapes = shapes.copy() + self.shape_groups = shape_groups.copy() + + return img + + def parameters(self): + if self.optimize_points: + if self.mlp_train: + self.points_vars = self.mlp.parameters() + else: + self.points_vars = [] + # storkes' location optimization + for i, path in enumerate(self.shapes): + if self.optimize_flag[i]: + path.points.requires_grad = True + self.points_vars.append(path.points) + self.optimize_flag[i] = False + + if self.width_optim: + return self.points_vars, self.mlp_width.parameters() + return self.points_vars + + def get_mlp(self): + return self.mlp + + def get_width_mlp(self): + if self.width_optim_global: + return self.mlp_width + else: + return None + + def set_color_parameters(self): + # for storkes' color optimization (opacity) + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if self.optimize_flag[i]: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + return self.color_vars + + def get_color_parameters(self): + return self.color_vars + + def get_widths(self): + if self.width_optim_global: + return self.stroke_probs + return None + + def get_strokes_in_canvas_count(self): + return self.out_of_canvas_mask.sum() + + def get_strokes_count(self): + if self.width_optim_global: + with torch.no_grad(): + return torch.sum(self.stroke_probs) + return self.num_paths + + def is_in_canvas(self, canvas_width, canvas_height, path): + shapes, shape_groups = [], [] + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + shape_groups.append(path_group) + _render = pydiffvg.RenderFunction.apply + scene_method_cfg = pydiffvg.RenderFunction.serialize_scene( + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_method_cfg) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, + device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3].detach().cpu().numpy() + return (1 - img).sum() + + def save_svg(self, output_dir, name): + if not self.width_optim: + pydiffvg.save_svg('{}/{}.svg'.format(output_dir, name), self.canvas_width, self.canvas_height, self.shapes, + self.shape_groups) + else: + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + new_shapes, new_shape_groups = [], [] + for path in self.shapes: + is_in_canvas_ = True + w = path.stroke_width / 1.5 + if w > 0.7 and is_in_canvas_: + new_shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(new_shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + new_shape_groups.append(path_group) + pydiffvg.save_svg('{}/{}.svg'.format(output_dir, name), self.canvas_width, self.canvas_height, new_shapes, + new_shape_groups) + + def clip_preprocess(self, target_im): + model, preprocess = clip.load(self.saliency_clip_model, device=self.device, jit=False) + model.eval().to(self.device) + data_transforms = transforms.Compose([ + preprocess.transforms[-1], + ]) + return data_transforms(target_im).to(self.device) + + def dino_attn(self): + patch_size = 8 # dino hyperparameter + threshold = 0.6 + + # for dino model + mean_imagenet = torch.Tensor([0.485, 0.456, 0.406])[None, :, None, None].to(self.device) + std_imagenet = torch.Tensor([0.229, 0.224, 0.225])[None, :, None, None].to(self.device) + totens = transforms.Compose([ + transforms.Resize((self.canvas_height, self.canvas_width)), + transforms.ToTensor() + ]) + + dino_model = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').eval().to(self.device) + + self.main_im = Image.open(self.target_path).convert("RGB") + main_im_tensor = totens(self.main_im).to(self.device) + img = (main_im_tensor.unsqueeze(0) - mean_imagenet) / std_imagenet + w_featmap = img.shape[-2] // patch_size + h_featmap = img.shape[-1] // patch_size + + with torch.no_grad(): + attn = dino_model.get_last_selfattention(img).detach().cpu()[0] + + nh = attn.shape[0] + attn = attn[:, 0, 1:].reshape(nh, -1) + val, idx = torch.sort(attn) + val /= torch.sum(val, dim=1, keepdim=True) + cumval = torch.cumsum(val, dim=1) + th_attn = cumval > (1 - threshold) + idx2 = torch.method_cfgort(idx) + for head in range(nh): + th_attn[head] = th_attn[head][idx2[head]] + th_attn = th_attn.reshape(nh, w_featmap, h_featmap).float() + th_attn = nn.functional.interpolate(th_attn.unsqueeze(0), scale_factor=patch_size, mode="nearest")[0].cpu() + + attn = attn.reshape(nh, w_featmap, h_featmap).float() + attn = nn.functional.interpolate(attn.unsqueeze(0), scale_factor=patch_size, mode="nearest")[0].cpu() + + return attn + + def clip_attn(self): + model, preprocess = clip.load(self.saliency_clip_model, device=self.device, jit=False) + model.eval().to(self.device) + + if "RN" in self.saliency_clip_model: + text_input = clip.tokenize([self.text_target]).to(self.device) + saliency_layer = "layer4" + attn_map = gradCAM( + model.visual, + self.image2clip_input, + model.encode_text(text_input).float(), + getattr(model.visual, saliency_layer) + ) + attn_map = attn_map.squeeze().detach().cpu().numpy() + attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min()) + else: # ViT + attn_map = interpret(self.image2clip_input, model, device=self.device) + + del model + return attn_map + + def set_attention_map(self): + assert self.saliency_model in ["dino", "clip"] + if self.saliency_model == "dino": + return self.dino_attn() + elif self.saliency_model == "clip": + return self.clip_attn() + + def softmax(self, x, tau=0.2): + e_x = np.exp(x / tau) + return e_x / e_x.sum() + + def set_inds_clip(self): + attn_map = (self.attention_map - self.attention_map.min()) / ( + self.attention_map.max() - self.attention_map.min()) + if self.xdog_intersec: + xdog = XDoG(k=10) + im_xdog = xdog(self.image2clip_input[0].permute(1, 2, 0).cpu().numpy()) + intersec_map = (1 - im_xdog) * attn_map + attn_map = intersec_map + if self.mask_object_attention: + attn_map = attn_map * self.mask[0, 0].cpu().numpy() + + attn_map_soft = np.copy(attn_map) + attn_map_soft[attn_map > 0] = self.softmax(attn_map[attn_map > 0], tau=self.softmax_temp) + + k = self.num_stages * self.num_paths + self.inds = np.random.choice(range(attn_map.flatten().shape[0]), size=k, replace=False, + p=attn_map_soft.flatten()) + self.inds = np.array(np.unravel_index(self.inds, attn_map.shape)).T + + self.inds_normalised = np.zeros(self.inds.shape) + self.inds_normalised[:, 0] = self.inds[:, 1] / self.canvas_width + self.inds_normalised[:, 1] = self.inds[:, 0] / self.canvas_height + self.inds_normalised = self.inds_normalised.tolist() + return attn_map_soft + + def set_inds_dino(self): + k = max(3, (self.num_stages * self.num_paths) // 6 + 1) # sample top 3 three points from each attention head + num_heads = self.attention_map.shape[0] + self.inds = np.zeros((k * num_heads, 2)) + # "thresh" is used for visualisaiton purposes only + thresh = torch.zeros(num_heads + 1, self.attention_map.shape[1], self.attention_map.shape[2]) + softmax = nn.Softmax(dim=1) + for i in range(num_heads): + # replace "self.attention_map[i]" with "self.attention_map" to get the highest values among + # all heads. + topk, indices = np.unique(self.attention_map[i].numpy(), return_index=True) + topk = topk[::-1][:k] + cur_attn_map = self.attention_map[i].numpy() + # prob function for uniform sampling + prob = cur_attn_map.flatten() + prob[prob > topk[-1]] = 1 + prob[prob <= topk[-1]] = 0 + prob = prob / prob.sum() + thresh[i] = torch.Tensor(prob.reshape(cur_attn_map.shape)) + + # choose k pixels from each head + inds = np.random.choice(range(cur_attn_map.flatten().shape[0]), size=k, replace=False, p=prob) + inds = np.unravel_index(inds, cur_attn_map.shape) + self.inds[i * k: i * k + k, 0] = inds[0] + self.inds[i * k: i * k + k, 1] = inds[1] + + # for visualisaiton + sum_attn = self.attention_map.sum(0).numpy() + mask = np.zeros(sum_attn.shape) + mask[thresh[:-1].sum(0) > 0] = 1 + sum_attn = sum_attn * mask + sum_attn = sum_attn / sum_attn.sum() + thresh[-1] = torch.Tensor(sum_attn) + + # sample num_paths from the chosen pixels. + prob_sum = sum_attn[self.inds[:, 0].astype(np.int), self.inds[:, 1].astype(np.int)] + prob_sum = prob_sum / prob_sum.sum() + new_inds = [] + for i in range(self.num_stages): + new_inds.extend(np.random.choice(range(self.inds.shape[0]), size=self.num_paths, replace=False, p=prob_sum)) + self.inds = self.inds[new_inds] + + self.inds_normalised = np.zeros(self.inds.shape) + self.inds_normalised[:, 0] = self.inds[:, 1] / self.canvas_width + self.inds_normalised[:, 1] = self.inds[:, 0] / self.canvas_height + self.inds_normalised = self.inds_normalised.tolist() + return thresh + + def set_attention_threshold_map(self): + assert self.saliency_model in ["dino", "clip"] + if self.saliency_model == "dino": + return self.set_inds_dino() + elif self.saliency_model == "clip": + return self.set_inds_clip() + + def get_attn(self): + return self.attention_map + + def get_thresh(self): + return self.thresh + + def get_inds(self): + return self.inds + + def get_mask(self): + return self.mask + + def set_random_noise(self, epoch): + if epoch % self.args.save_step == 0: + self.add_random_noise = False + else: + self.add_random_noise = "noise" in self.args.augemntations + + +class PainterOptimizer: + def __init__(self, args, renderer): + self.renderer = renderer + self.points_lr = args.lr + self.color_lr = args.color_lr + self.args = args + self.optim_color = args.force_sparse + self.width_optim = args.width_optim + self.width_optim_global = args.width_optim + self.width_lr = args.width_lr + self.optimize_points = args.optimize_points + self.optimize_points_global = args.optimize_points + self.points_optim = None + self.width_optimizer = None + self.mlp_width_weights_path = args.mlp_width_weights_path + self.mlp_points_weights_path = args.mlp_points_weights_path + self.load_points_opt_weights = args.load_points_opt_weights + # self.only_width = args.only_width + + def turn_off_points_optim(self): + self.optimize_points = False + + def switch_opt(self): + self.width_optim = not self.width_optim + self.optimize_points = not self.optimize_points + + def init_optimizers(self): + if self.width_optim: + points_params, width_params = self.renderer.parameters() + self.width_optimizer = torch.optim.Adam(width_params, lr=self.width_lr) + if self.mlp_width_weights_path != "none": + checkpoint = torch.load(self.mlp_width_weights_path) + self.width_optimizer.load_state_dict(checkpoint['optimizer_state_dict']) + print("optimizer checkpoint loaded from ", self.mlp_width_weights_path) + else: + points_params = self.renderer.parameters() + + if self.optimize_points: + self.points_optim = torch.optim.Adam(points_params, lr=self.points_lr) + if self.mlp_points_weights_path != "none" and self.load_points_opt_weights: + checkpoint = torch.load(self.mlp_points_weights_path) + self.points_optim.load_state_dict(checkpoint['optimizer_state_dict']) + print("optimizer checkpoint loaded from ", self.mlp_points_weights_path) + + if self.optim_color: + self.color_optim = torch.optim.Adam(self.renderer.set_color_parameters(), lr=self.color_lr) + + def zero_grad_(self): + if self.optimize_points: + self.points_optim.zero_grad() + if self.width_optim: + self.width_optimizer.zero_grad() + if self.optim_color: + self.color_optim.zero_grad() + + def step_(self): + if self.optimize_points: + self.points_optim.step() + if self.width_optim: + self.width_optimizer.step() + if self.optim_color: + self.color_optim.step() + + def get_lr(self, optim="points"): + if optim == "points" and self.optimize_points_global: + return self.points_optim.param_groups[0]['lr'] + if optim == "width" and self.width_optim_global: + return self.width_optimizer.param_groups[0]['lr'] + else: + return None + + def get_points_optim(self): + return self.points_optim + + def get_width_optim(self): + return self.width_optimizer + + +class LinearDecayLR: + + def __init__(self, decay_every, decay_ratio): + self.decay_every = decay_every + self.decay_ratio = decay_ratio + + def __call__(self, n): + decay_time = n // self.decay_every + decay_step = n % self.decay_every + lr_s = self.decay_ratio ** decay_time + lr_e = self.decay_ratio ** (decay_time + 1) + r = decay_step / self.decay_every + lr = lr_s * (1 - r) + lr_e * r + return lr + + +def interpret(image, clip_model, device): + # virtual forward to get attention map + images = image.repeat(1, 1, 1, 1) + _ = clip_model.encode_image(images) # ensure `attn_probs` in attention is not empty + clip_model.zero_grad() + + image_attn_blocks = list(dict(clip_model.visual.transformer.resblocks.named_children()).values()) + # create R to store attention map + num_tokens = image_attn_blocks[0].attn_probs.shape[-1] + R = torch.eye(num_tokens, num_tokens, dtype=image_attn_blocks[0].attn_probs.dtype).to(device) + R = R.unsqueeze(0).expand(1, num_tokens, num_tokens) + + cams = [] + for i, blk in enumerate(image_attn_blocks): # 12 attention blocks + cam = blk.attn_probs.detach() # attn_probs shape: [12, 50, 50] + # each patch is 7x7 so we have 49 pixels + 1 for positional encoding + cam = cam.reshape(1, -1, cam.shape[-1], cam.shape[-1]) + cam = cam.clamp(min=0) + cam = cam.clamp(min=0).mean(dim=1) # mean of the 12 something + cams.append(cam) + R = R + torch.bmm(cam, R) + + cams_avg = torch.cat(cams) # [12, 50, 50] + cams_avg = cams_avg[:, 0, 1:] # [12, 49] + image_relevance = cams_avg.mean(dim=0).unsqueeze(0) # [1, 49] + image_relevance = image_relevance.reshape(1, 1, 7, 7) # [1, 1, 7, 7] + # interpolate: [1, 1, 7, 7] -> [1, 3, 224, 224] + image_relevance = torch.nn.functional.interpolate(image_relevance, size=224, mode='bicubic') + image_relevance = image_relevance.reshape(224, 224).data.cpu().numpy().astype(np.float32) + # normalize the tensor to [0, 1] + image_relevance = (image_relevance - image_relevance.min()) / (image_relevance.max() - image_relevance.min()) + return image_relevance + + +class MLP(nn.Module): + def __init__(self, num_strokes, num_cp, width_optim=False): + super().__init__() + outdim = 1000 + self.width_optim = width_optim + self.layers_points = nn.Sequential( + nn.Flatten(), + nn.Linear(num_strokes * num_cp * 2, outdim), + nn.SELU(inplace=True), + nn.Linear(outdim, outdim), + nn.SELU(inplace=True), + nn.Linear(outdim, num_strokes * num_cp * 2), + ) + + def forward(self, x, widths=None): + '''Forward pass''' + deltas = self.layers_points(x) + # if self.width_optim: + # return x.flatten() + 0.1 * deltas, self.layers_width(widths) + return x.flatten() + 0.1 * deltas + + +class WidthMLP(nn.Module): + def __init__(self, num_strokes, num_cp, width_optim=False): + super().__init__() + outdim = 1000 + self.width_optim = width_optim + + self.layers_width = nn.Sequential( + nn.Linear(num_strokes, outdim), + nn.SELU(inplace=True), + nn.Linear(outdim, outdim), + nn.SELU(inplace=True), + nn.Linear(outdim, num_strokes), + nn.Sigmoid() + ) + + def forward(self, widths=None): + '''Forward pass''' + return self.layers_width(widths) + + +def init_weights(m): + if isinstance(m, nn.Linear): + torch.nn.init.xavier_uniform(m.weight) + m.bias.data.fill_(0.01) diff --git a/pytorch_svgrender/painter/clipascene/scripts_utils.py b/pytorch_svgrender/painter/clipascene/scripts_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..97f42926383dee0624372d37f313f8e6e106b39f --- /dev/null +++ b/pytorch_svgrender/painter/clipascene/scripts_utils.py @@ -0,0 +1,138 @@ +import os + +import numpy as np +import pydiffvg +import torch +from scipy.optimize import curve_fit + + +def get_svg_file(path): + files = os.listdir(path) + files = [f for f in files if "best.svg" in f] + return files[0] + + +def get_seed(filename): + filename = filename[:-9] + keyword = 'seed' + before_keyword, keyword, after_keyword = filename.partition(keyword) + return after_keyword + + +def get_clip_loss(path, layer): + path_config = path / "config.npy" + config = np.load(path_config, allow_pickle=True)[()] + loss_clip = np.array(config[f"loss_eval"]) + best_iter = np.argsort(loss_clip)[0] + loss_clip_layer = np.array(config[f"clip_vit_l{layer}_original_eval"]) + return loss_clip, best_iter, loss_clip_layer + + +def ratios_to_str(ratios): + ratios_str = "" + for r_ in ratios: + r_str = f"{r_:.3f}" + ratios_str += f"{float(r_str)}," + ratios_str = ratios_str[:-1] + return ratios_str + + +def func(x, a, c, d): + return a * np.exp(c * x) + + +def func_inv(y, a, c, d): + return np.log(y / a) * (1 / c) + + +def get_func(ratios_rel, start_x, start_ys): + target_ys = ratios_rel[start_ys:] + x = np.linspace(start_x, start_x + len(target_ys) - 1, len(target_ys)) + # calculate exponent + popt, pcov = curve_fit(func, x, target_ys, maxfev=3000) + return popt + + +def get_clip_loss2(path, layer, object_or_background): + path_config = path / "config.npy" + config = np.load(path_config, allow_pickle=True)[()] + loss_clip = np.array(config[f"loss_eval"]) + best_iter = np.argsort(loss_clip)[0] + loss_clip_layer = np.array(config[f"clip_vit_l{layer}_original_eval"]) + if object_or_background == "object": + loss_clip_layer4 = np.array(config[f"clip_vit_l4_original_eval"]) + loss_clip_layer = 1 * loss_clip_layer4 + loss_clip_layer + return best_iter, loss_clip_layer + + +def get_ratios_dict(path_to_initial_sketches, folder_name_l, layer, im_name, object_or_background, step_size_l, + num_ratios=8): + # get the sketch of the given layer, and get L_clip_i + svg_filename = get_svg_file(path_to_initial_sketches / folder_name_l) + seed = get_seed(svg_filename) + path_li = path_to_initial_sketches / folder_name_l / f"{folder_name_l}_seed{seed}" + best_iter, loss_clip_layer = get_clip_loss2(path_li, layer, object_or_background) + best_lclip_layer = loss_clip_layer[best_iter] + r_1_k = 1 / best_lclip_layer + + # get the next ratios by jumping by 2 + r_j_k = r_1_k + ratios_k = [r_1_k] + for j in range(4): + r_j_k = r_j_k / 2 + ratios_k.append(r_j_k) + start_ys, start_x, end_x_addition = 0, 0, 0 + popt = get_func(ratios_k, start_x=0, start_ys=0) # fit the function to ratios_k + x_1_k = func_inv([r_1_k], *popt) + + step_size = step_size_l + num_steps = num_ratios - start_x + end_x_addition + start_ = x_1_k[0] + end = num_steps * step_size + # sample the function from the initial scaled r_1 with the corresponding step size + new_xs_layer_l = np.linspace(start_, end - step_size + start_, num_steps) + # print("new_xs_layer_l", new_xs_layer_l) + ratios_li = func(new_xs_layer_l, *popt) + ratios_str = ratios_to_str(ratios_li) + xs_layer_l_str = ratios_to_str(new_xs_layer_l) + print(f"layer {layer} r_1_k {r_1_k} \n new {ratios_str} \n x {xs_layer_l_str}\n") + return ratios_str + + +def read_svg(path_svg, multiply=0, resize_obj=False, params=None, opacity=1, device=None): + pydiffvg.set_device(device) + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(path_svg) + for group in shape_groups: + group.stroke_color = torch.tensor([0, 0, 0, opacity]) + if resize_obj and params: + w, h = params["scale_w"], params["scale_h"] + for path in shapes: + path.points = path.points / canvas_width + path.points = 2 * path.points - 1 + path.points[:, 0] /= (w) # / canvas_width) + path.points[:, 1] /= (h) # / canvas_height) + path.points = 0.5 * (path.points + 1.0) * canvas_width + center_x, center_y = canvas_width / 2, canvas_height / 2 + path.points[:, 0] += (params["original_center_x"] * canvas_width - center_x) + path.points[:, 1] += (params["original_center_y"] * canvas_height - center_y) + if multiply: + canvas_width *= 2 + canvas_height *= 2 + for path in shapes: + path.points *= 2 + path.stroke_width *= multiply + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene( + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, + device=device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3].cpu().numpy() + return img diff --git a/pytorch_svgrender/painter/clipascene/sketch_utils.py b/pytorch_svgrender/painter/clipascene/sketch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f7a6485c90ff1f788320be923db955b21ba92119 --- /dev/null +++ b/pytorch_svgrender/painter/clipascene/sketch_utils.py @@ -0,0 +1,259 @@ +import matplotlib.pyplot as plt +import numpy as np +import pydiffvg +import torch +from PIL import Image +from pytorch_svgrender.painter.clipascene import u2net_utils +from pytorch_svgrender.painter.clipasso.u2net import U2NET +from scipy import ndimage +from skimage import morphology +from skimage.measure import label +from skimage.transform import resize +from torch.utils.data import DataLoader +from torchvision import transforms +from torchvision.utils import make_grid + + +def plot_attn_dino(attn, threshold_map, inputs, inds, output_path): + # currently supports one image (and not a batch) + plt.figure(figsize=(10, 5)) + + plt.subplot(2, attn.shape[0] + 2, 1) + main_im = make_grid(inputs, normalize=True, pad_value=2) + main_im = np.transpose(main_im.cpu().numpy(), (1, 2, 0)) + plt.imshow(main_im, interpolation='nearest') + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.title("input im") + plt.axis("off") + + plt.subplot(2, attn.shape[0] + 2, 2) + plt.imshow(attn.sum(0).numpy(), interpolation='nearest') + plt.title("atn map sum") + plt.axis("off") + + plt.subplot(2, attn.shape[0] + 2, attn.shape[0] + 3) + plt.imshow(threshold_map[-1].numpy(), interpolation='nearest') + plt.title("prob sum") + plt.axis("off") + + plt.subplot(2, attn.shape[0] + 2, attn.shape[0] + 4) + plt.imshow(threshold_map[:-1].sum(0).numpy(), interpolation='nearest') + plt.title("thresh sum") + plt.axis("off") + + for i in range(attn.shape[0]): + plt.subplot(2, attn.shape[0] + 2, i + 3) + plt.imshow(attn[i].numpy()) + plt.axis("off") + plt.subplot(2, attn.shape[0] + 2, attn.shape[0] + 1 + i + 4) + plt.imshow(threshold_map[i].numpy()) + plt.axis("off") + plt.tight_layout() + plt.savefig(output_path) + plt.close() + + +def plot_attn_clip(attn, threshold_map, inputs, inds, output_path): + # currently supports one image (and not a batch) + plt.figure(figsize=(10, 5)) + + plt.subplot(1, 3, 1) + main_im = make_grid(inputs, normalize=True, pad_value=2) + main_im = np.transpose(main_im.cpu().numpy(), (1, 2, 0)) + plt.imshow(main_im, interpolation='nearest') + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.title("input im") + plt.axis("off") + + plt.subplot(1, 3, 2) + plt.imshow(attn, interpolation='nearest', vmin=0, vmax=1) + plt.title("attn map") + plt.axis("off") + + plt.subplot(1, 3, 3) + threshold_map_ = (threshold_map - threshold_map.min()) / \ + (threshold_map.max() - threshold_map.min()) + plt.imshow(threshold_map_, interpolation='nearest', vmin=0, vmax=1) + plt.title("prob softmax") + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.axis("off") + + plt.tight_layout() + plt.savefig(output_path) + plt.close() + + +def plot_attn(attn, threshold_map, inputs, inds, output_path, saliency_model): + if saliency_model == "dino": + plot_attn_dino(attn, threshold_map, inputs, inds, output_path) + elif saliency_model == "clip": + plot_attn_clip(attn, threshold_map, inputs, inds, output_path) + + +def fix_image_scale(im): + im_np = np.array(im) / 255 + height, width = im_np.shape[0], im_np.shape[1] + max_len = max(height, width) + 20 + new_background = np.ones((max_len, max_len, 3)) + y, x = max_len // 2 - height // 2, max_len // 2 - width // 2 + new_background[y: y + height, x: x + width] = im_np + new_background = (new_background / new_background.max() * 255).astype(np.uint8) + new_im = Image.fromarray(new_background) + return new_im + + +def get_size_of_largest_cc(binary_im): + labels, num = label(binary_im, background=0, return_num=True) + (unique, counts) = np.unique(labels, return_counts=True) + args = np.argsort(counts)[::-1] + largest_cc_label = unique[args][1] # without background + return counts[args][1] + + +def get_num_cc(binary_im): + labels, num = label(binary_im, background=0, return_num=True) + return num + + +def get_obj_bb(binary_im): + y = np.where(binary_im != 0)[0] + x = np.where(binary_im != 0)[1] + x0, x1, y0, y1 = x.min(), x.max(), y.min(), y.max() + return x0, x1, y0, y1 + + +def cut_and_resize(im, x0, x1, y0, y1, new_height, new_width): + cut_obj = im[y0: y1, x0: x1] + resized_obj = resize(cut_obj, (new_height, new_width)) + new_mask = np.zeros(im.shape) + center_y_new = int(new_height / 2) + center_x_new = int(new_width / 2) + center_targ_y = int(new_mask.shape[0] / 2) + center_targ_x = int(new_mask.shape[1] / 2) + startx, starty = center_targ_x - center_x_new, center_targ_y - center_y_new + new_mask[starty: starty + resized_obj.shape[0], startx: startx + resized_obj.shape[1]] = resized_obj + return new_mask + + +def get_mask_u2net(pil_im, output_dir, u2net_path, resize_obj=0, preprocess=False, device="cpu"): + w, h = pil_im.size[0], pil_im.size[1] + + test_salobj_dataset = u2net_utils.SalObjDataset(imgs_list=[pil_im], + lbl_name_list=[], + transform=transforms.Compose([u2net_utils.RescaleT(320), + u2net_utils.ToTensorLab(flag=0)])) + test_salobj_dataloader = DataLoader(test_salobj_dataset, + batch_size=1, + shuffle=False, + num_workers=1) + + input_im_trans = next(iter(test_salobj_dataloader)) + + net = U2NET(3, 1) + net.load_state_dict(torch.load(u2net_path)) + net.to(device) + net.eval() + + with torch.no_grad(): + input_im_trans = input_im_trans.type(torch.FloatTensor) + d1, d2, d3, d4, d5, d6, d7 = net(input_im_trans.cuda()) + + pred = d1[:, 0, :, :] + pred = (pred - pred.min()) / (pred.max() - pred.min()) + predict = pred + + predict[predict < 0.5] = 0 + predict[predict >= 0.5] = 1 + + if preprocess: + predict = torch.tensor( + ndimage.binary_dilation(predict[0].cpu().numpy(), structure=np.ones((11, 11))).astype(int)).unsqueeze(0) + + mask = torch.cat([predict, predict, predict], axis=0).permute(1, 2, 0) + mask = mask.cpu().numpy() + max_val = mask.max() + mask[mask > max_val / 2] = 255 + mask = mask.astype(np.uint8) + mask = resize(mask, (h, w), anti_aliasing=False, order=0) + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + return mask + + mask = torch.cat([predict, predict, predict], axis=0).permute(1, 2, 0) + mask = mask.cpu().numpy() + mask = resize(mask, (h, w), anti_aliasing=False) + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + im = Image.fromarray((mask[:, :, 0] * 255).astype(np.uint8)).convert('RGB') + im.save(output_dir / "mask.png") + im_np = np.array(pil_im) + im_np = im_np / im_np.max() + + if resize_obj: + params = {} + mask_np = mask[:, :, 0].astype(int) + target_np = im_np + min_size = int(get_size_of_largest_cc(mask_np) / 3) + mask_np2 = morphology.remove_small_objects((mask_np > 0), min_size=min_size).astype(int) + num_cc = get_num_cc(mask_np2) + + mask_np3 = np.ones((h, w, 3)) + mask_np3[:, :, 0] = mask_np2 + mask_np3[:, :, 1] = mask_np2 + mask_np3[:, :, 2] = mask_np2 + + x0, x1, y0, y1 = get_obj_bb(mask_np2) + + im_width, im_height = x1 - x0, y1 - y0 + max_size = max(im_width, im_height) + target_size = int(min(h, w) * 0.7) + + if max_size < target_size and num_cc == 1: + if im_width > im_height: + new_width, new_height = target_size, int((target_size / im_width) * im_height) + else: + new_width, new_height = int((target_size / im_height) * im_width), target_size + mask = cut_and_resize(mask_np3, x0, x1, y0, y1, new_height, new_width) + target_np = target_np / target_np.max() + im_np = cut_and_resize(target_np, x0, x1, y0, y1, new_height, new_width) + + params["original_center_y"] = (y0 + (y1 - y0) / 2) / h + params["original_center_x"] = (x0 + (x1 - x0) / 2) / w + params["scale_w"] = new_width / im_width + params["scale_h"] = new_height / im_height + + np.save(output_dir / "resize_params.npy", params) + + im_np = mask * im_np + im_np[mask == 0] = 1 + im_final = (im_np / im_np.max() * 255).astype(np.uint8) + im_final = Image.fromarray(im_final) + + return im_final, mask + + +def is_in_canvas(canvas_width, canvas_height, path, device): + shapes, shape_groups = [], [] + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + shape_groups.append(path_group) + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene( + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, + device=device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3].detach().cpu().numpy() + return (1 - img).sum() diff --git a/pytorch_svgrender/painter/clipascene/u2net_utils.py b/pytorch_svgrender/painter/clipascene/u2net_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fc473919e9dc187a147e30bcb870dd440b7e0b06 --- /dev/null +++ b/pytorch_svgrender/painter/clipascene/u2net_utils.py @@ -0,0 +1,173 @@ +import numpy as np +import torch + +from skimage import io, transform, color +from torch.utils.data import Dataset + + +class SalObjDataset(Dataset): + def __init__(self, imgs_list, lbl_name_list, transform=None): + self.imgs_list = imgs_list + self.label_name_list = lbl_name_list + self.transform = transform + + def __len__(self): + return len(self.imgs_list) + + def __getitem__(self, idx): + + image = np.array(self.imgs_list[idx]) + imidx = np.array([idx]) + + if (0 == len(self.label_name_list)): + label_3 = np.zeros(image.shape) + else: + label_3 = io.imread(self.label_name_list[idx]) + + label = np.zeros(label_3.shape[0:2]) + if (3 == len(label_3.shape)): + label = label_3[:, :, 0] + elif (2 == len(label_3.shape)): + label = label_3 + + if (3 == len(image.shape) and 2 == len(label.shape)): + label = label[:, :, np.newaxis] + elif (2 == len(image.shape) and 2 == len(label.shape)): + image = image[:, :, np.newaxis] + label = label[:, :, np.newaxis] + + sample = {'imidx': imidx, 'image': image, 'label': label} + + if self.transform: + sample = self.transform(sample) + + return sample['image'] + + +class RescaleT(object): + + def __init__(self, output_size): + assert isinstance(output_size, (int, tuple)) + self.output_size = output_size + + def __call__(self, sample): + imidx, image, label = sample['imidx'], sample['image'], sample['label'] + + h, w = image.shape[:2] + + if isinstance(self.output_size, int): + if h > w: + new_h, new_w = self.output_size * h / w, self.output_size + else: + new_h, new_w = self.output_size, self.output_size * w / h + else: + new_h, new_w = self.output_size + + new_h, new_w = int(new_h), int(new_w) + + # #resize the image to new_h x new_w and convert image from range [0,255] to [0,1] + # img = transform.resize(image,(new_h,new_w),mode='constant') + # lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True) + + img = transform.resize(image, (self.output_size, self.output_size), mode='constant') + lbl = transform.resize(label, (self.output_size, self.output_size), mode='constant', order=0, + preserve_range=True) + + return {'imidx': imidx, 'image': img, 'label': lbl} + + +class ToTensorLab(object): + """Convert ndarrays in sample to Tensors.""" + + def __init__(self, flag=0): + self.flag = flag + + def __call__(self, sample): + + imidx, image, label = sample['imidx'], sample['image'], sample['label'] + + tmpLbl = np.zeros(label.shape) + + if (np.max(label) < 1e-6): + label = label + else: + label = label / np.max(label) + + # change the color space + if self.flag == 2: # with rgb and Lab colors + tmpImg = np.zeros((image.shape[0], image.shape[1], 6)) + tmpImgt = np.zeros((image.shape[0], image.shape[1], 3)) + if image.shape[2] == 1: + tmpImgt[:, :, 0] = image[:, :, 0] + tmpImgt[:, :, 1] = image[:, :, 0] + tmpImgt[:, :, 2] = image[:, :, 0] + else: + tmpImgt = image + tmpImgtl = color.rgb2lab(tmpImgt) + + # nomalize image to range [0,1] + tmpImg[:, :, 0] = (tmpImgt[:, :, 0] - np.min(tmpImgt[:, :, 0])) / ( + np.max(tmpImgt[:, :, 0]) - np.min(tmpImgt[:, :, 0])) + tmpImg[:, :, 1] = (tmpImgt[:, :, 1] - np.min(tmpImgt[:, :, 1])) / ( + np.max(tmpImgt[:, :, 1]) - np.min(tmpImgt[:, :, 1])) + tmpImg[:, :, 2] = (tmpImgt[:, :, 2] - np.min(tmpImgt[:, :, 2])) / ( + np.max(tmpImgt[:, :, 2]) - np.min(tmpImgt[:, :, 2])) + tmpImg[:, :, 3] = (tmpImgtl[:, :, 0] - np.min(tmpImgtl[:, :, 0])) / ( + np.max(tmpImgtl[:, :, 0]) - np.min(tmpImgtl[:, :, 0])) + tmpImg[:, :, 4] = (tmpImgtl[:, :, 1] - np.min(tmpImgtl[:, :, 1])) / ( + np.max(tmpImgtl[:, :, 1]) - np.min(tmpImgtl[:, :, 1])) + tmpImg[:, :, 5] = (tmpImgtl[:, :, 2] - np.min(tmpImgtl[:, :, 2])) / ( + np.max(tmpImgtl[:, :, 2]) - np.min(tmpImgtl[:, :, 2])) + + # tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg)) + + tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.mean(tmpImg[:, :, 0])) / np.std(tmpImg[:, :, 0]) + tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.mean(tmpImg[:, :, 1])) / np.std(tmpImg[:, :, 1]) + tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.mean(tmpImg[:, :, 2])) / np.std(tmpImg[:, :, 2]) + tmpImg[:, :, 3] = (tmpImg[:, :, 3] - np.mean(tmpImg[:, :, 3])) / np.std(tmpImg[:, :, 3]) + tmpImg[:, :, 4] = (tmpImg[:, :, 4] - np.mean(tmpImg[:, :, 4])) / np.std(tmpImg[:, :, 4]) + tmpImg[:, :, 5] = (tmpImg[:, :, 5] - np.mean(tmpImg[:, :, 5])) / np.std(tmpImg[:, :, 5]) + + elif self.flag == 1: # with Lab color + tmpImg = np.zeros((image.shape[0], image.shape[1], 3)) + + if image.shape[2] == 1: + tmpImg[:, :, 0] = image[:, :, 0] + tmpImg[:, :, 1] = image[:, :, 0] + tmpImg[:, :, 2] = image[:, :, 0] + else: + tmpImg = image + + tmpImg = color.rgb2lab(tmpImg) + + # tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg)) + + tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.min(tmpImg[:, :, 0])) / ( + np.max(tmpImg[:, :, 0]) - np.min(tmpImg[:, :, 0])) + tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.min(tmpImg[:, :, 1])) / ( + np.max(tmpImg[:, :, 1]) - np.min(tmpImg[:, :, 1])) + tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.min(tmpImg[:, :, 2])) / ( + np.max(tmpImg[:, :, 2]) - np.min(tmpImg[:, :, 2])) + + tmpImg[:, :, 0] = (tmpImg[:, :, 0] - np.mean(tmpImg[:, :, 0])) / np.std(tmpImg[:, :, 0]) + tmpImg[:, :, 1] = (tmpImg[:, :, 1] - np.mean(tmpImg[:, :, 1])) / np.std(tmpImg[:, :, 1]) + tmpImg[:, :, 2] = (tmpImg[:, :, 2] - np.mean(tmpImg[:, :, 2])) / np.std(tmpImg[:, :, 2]) + + else: # with rgb color + tmpImg = np.zeros((image.shape[0], image.shape[1], 3)) + image = image / np.max(image) + if image.shape[2] == 1: + tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229 + tmpImg[:, :, 1] = (image[:, :, 0] - 0.485) / 0.229 + tmpImg[:, :, 2] = (image[:, :, 0] - 0.485) / 0.229 + else: + tmpImg[:, :, 0] = (image[:, :, 0] - 0.485) / 0.229 + tmpImg[:, :, 1] = (image[:, :, 1] - 0.456) / 0.224 + tmpImg[:, :, 2] = (image[:, :, 2] - 0.406) / 0.225 + + tmpLbl[:, :, 0] = label[:, :, 0] + + tmpImg = tmpImg.transpose((2, 0, 1)) + tmpLbl = label.transpose((2, 0, 1)) + + return {'imidx': torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)} diff --git a/pytorch_svgrender/painter/clipasso/__init__.py b/pytorch_svgrender/painter/clipasso/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9f2ba3c169ab89442d58f0c08e77a22a8ab4a34 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .painter_params import Painter, PainterOptimizer +from .loss import Loss + +__all__ = [ + 'Painter', 'PainterOptimizer', + 'Loss' +] diff --git a/pytorch_svgrender/painter/clipasso/grad_cam.py b/pytorch_svgrender/painter/clipasso/grad_cam.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d928fdfdf7392139be9047c9b777836bf4d35a --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/grad_cam.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# Reference: https://arxiv.org/abs/1610.02391 +def gradCAM( + model: nn.Module, + input: torch.Tensor, + target: torch.Tensor, + layer: nn.Module +) -> torch.Tensor: + # Zero out any gradients at the input. + if input.grad is not None: + input.grad.data.zero_() + + # Disable gradient settings. + requires_grad = {} + for name, param in model.named_parameters(): + requires_grad[name] = param.requires_grad + param.requires_grad_(False) + + # Attach a hook to the model at the desired layer. + assert isinstance(layer, nn.Module) + with Hook(layer) as hook: + # Do a forward and backward pass. + output = model(input) + output.backward(target) + + grad = hook.gradient.float() + act = hook.activation.float() + + # Global average pool gradient across spatial dimension + # to obtain importance weights. + alpha = grad.mean(dim=(2, 3), keepdim=True) + # Weighted combination of activation maps over channel + # dimension. + gradcam = torch.sum(act * alpha, dim=1, keepdim=True) + # We only want neurons with positive influence so we + # clamp any negative ones. + gradcam = torch.clamp(gradcam, min=0) + + # Resize gradcam to input resolution. + gradcam = F.interpolate(gradcam, input.shape[2:], mode='bicubic', align_corners=False) + + # Restore gradient settings. + for name, param in model.named_parameters(): + param.requires_grad_(requires_grad[name]) + + return gradcam + + +class Hook: + """Attaches to a module and records its activations and gradients.""" + + def __init__(self, module: nn.Module): + self.data = None + self.hook = module.register_forward_hook(self.save_grad) + + def save_grad(self, module, input, output): + self.data = output + output.requires_grad_(True) + output.retain_grad() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + self.hook.remove() + + @property + def activation(self) -> torch.Tensor: + return self.data + + @property + def gradient(self) -> torch.Tensor: + return self.data.grad diff --git a/pytorch_svgrender/painter/clipasso/loss.py b/pytorch_svgrender/painter/clipasso/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..499e23af38c9bcb0b3e7c01f1d150548fa6e2895 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/loss.py @@ -0,0 +1,453 @@ +import collections + +import torch +import torch.nn as nn +from torchvision import models, transforms + +from . import modified_clip as clip + + +class Loss(nn.Module): + def __init__(self, args, device): + super(Loss, self).__init__() + self.args = args + self.device = device + + self.percep_loss = args.percep_loss + self.train_with_clip = args.train_with_clip + self.clip_weight = args.clip_weight + self.start_clip = args.start_clip + + self.args.clip_conv_layer_weights = [ + float(item) for item in args.clip_conv_layer_weights.split(',') + ] + self.clip_conv_loss = args.clip_conv_loss + self.clip_fc_loss_weight = args.clip_fc_loss_weight + self.clip_text_guide = args.clip_text_guide + + self.losses_to_apply = self.get_losses_to_apply() + + self.loss_mapper = \ + { + "clip": CLIPLoss(args, device), + "clip_conv_loss": CLIPConvLoss(args, device) + } + + def get_losses_to_apply(self): + losses_to_apply = [] + if self.percep_loss != "none": + losses_to_apply.append(self.percep_loss) + if self.train_with_clip and self.start_clip == 0: + losses_to_apply.append("clip") + if self.clip_conv_loss: + losses_to_apply.append("clip_conv_loss") + if self.clip_text_guide: + losses_to_apply.append("clip_text") + return losses_to_apply + + def update_losses_to_apply(self, epoch): + if "clip" not in self.losses_to_apply: + if self.train_with_clip: + if epoch > self.start_clip: + self.losses_to_apply.append("clip") + + def forward(self, sketches, targets, color_parameters, renderer, epoch, points_optim=None, mode="train"): + loss = 0 + self.update_losses_to_apply(epoch) + + losses_dict = dict.fromkeys(self.losses_to_apply, torch.tensor([0.0]).to(self.device)) + loss_coeffs = dict.fromkeys(self.losses_to_apply, 1.0) + loss_coeffs["clip"] = self.clip_weight + loss_coeffs["clip_text"] = self.clip_text_guide + + for loss_name in self.losses_to_apply: + if loss_name in ["clip_conv_loss"]: + conv_loss = self.loss_mapper[loss_name](sketches, targets, mode) + for layer in conv_loss.keys(): + losses_dict[layer] = conv_loss[layer] + elif loss_name == "l2": + losses_dict[loss_name] = self.loss_mapper[loss_name](sketches, targets).mean() + else: + losses_dict[loss_name] = self.loss_mapper[loss_name](sketches, targets, mode).mean() + + for key in self.losses_to_apply: + losses_dict[key] = losses_dict[key] * loss_coeffs[key] + # print(losses_dict) + return losses_dict + + +class CLIPLoss(torch.nn.Module): + def __init__(self, args, device): + super(CLIPLoss, self).__init__() + + self.args = args + self.device = device + self.model, clip_preprocess = clip.load('ViT-B/32', self.device, jit=False) + self.model.eval() + self.preprocess = transforms.Compose([clip_preprocess.transforms[-1]]) # clip normalisation + self.NUM_AUGS = args.num_aug_clip + augemntations = [] + if "affine" in args.augemntations: + augemntations.append( + transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5) + ) + augemntations.append( + transforms.RandomResizedCrop(224, scale=(0.8, 0.8), ratio=(1.0, 1.0)) + ) + augemntations.append( + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + ) + self.augment_trans = transforms.Compose(augemntations) + + self.calc_target = True + self.include_target_in_aug = args.include_target_in_aug + self.counter = 0 + self.augment_both = args.augment_both + + def forward(self, sketches, targets, mode="train"): + if self.calc_target: + targets_ = self.preprocess(targets).to(self.device) + self.targets_features = self.model.encode_image(targets_).detach() + self.calc_target = False + + if mode == "eval": + # for regular clip distance, no augmentations + with torch.no_grad(): + sketches = self.preprocess(sketches).to(self.device) + sketches_features = self.model.encode_image(sketches) + return 1. - torch.cosine_similarity(sketches_features, self.targets_features) + + loss_clip = 0 + sketch_augs = [] + img_augs = [] + for n in range(self.NUM_AUGS): + augmented_pair = self.augment_trans(torch.cat([sketches, targets])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + + sketch_batch = torch.cat(sketch_augs) + + sketch_features = self.model.encode_image(sketch_batch) + + for n in range(self.NUM_AUGS): + loss_clip += (1. - torch.cosine_similarity(sketch_features[n:n + 1], self.targets_features, dim=1)) + self.counter += 1 + return loss_clip + + +class LPIPS(torch.nn.Module): + def __init__(self, pretrained=True, normalize=True, pre_relu=True, device=None): + """ + Args: + pre_relu(bool): if True, selects features **before** reLU activations + """ + super(LPIPS, self).__init__() + # VGG using perceptually-learned weights (LPIPS metric) + self.normalize = normalize + self.pretrained = pretrained + augemntations = [] + augemntations.append(transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop(224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + self.augment_trans = transforms.Compose(augemntations) + self.feature_extractor = LPIPS._FeatureExtractor(pretrained, pre_relu).to(device) + + def _l2_normalize_features(self, x, eps=1e-10): + nrm = torch.sqrt(torch.sum(x * x, dim=1, keepdim=True)) + return x / (nrm + eps) + + def forward(self, pred, target, mode="train"): + """Compare VGG features of two inputs.""" + + # Get VGG features + + sketch_augs, img_augs = [pred], [target] + if mode == "train": + for n in range(4): + augmented_pair = self.augment_trans(torch.cat([pred, target])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + img_augs.append(augmented_pair[1].unsqueeze(0)) + + xs = torch.cat(sketch_augs, dim=0) + ys = torch.cat(img_augs, dim=0) + + pred = self.feature_extractor(xs) + target = self.feature_extractor(ys) + + # L2 normalize features + if self.normalize: + pred = [self._l2_normalize_features(f) for f in pred] + target = [self._l2_normalize_features(f) for f in target] + + # TODO(mgharbi) Apply Richard's linear weights? + + if self.normalize: + diffs = [torch.sum((p - t) ** 2, 1) for (p, t) in zip(pred, target)] + else: + # mean instead of sum to avoid super high range + diffs = [torch.mean((p - t) ** 2, 1) for (p, t) in zip(pred, target)] + + # Spatial average + diffs = [diff.mean([1, 2]) for diff in diffs] + + return sum(diffs) + + class _FeatureExtractor(torch.nn.Module): + def __init__(self, pretrained, pre_relu): + super(LPIPS._FeatureExtractor, self).__init__() + vgg_pretrained = models.vgg16(pretrained=pretrained).features + + self.breakpoints = [0, 4, 9, 16, 23, 30] + if pre_relu: + for i, _ in enumerate(self.breakpoints[1:]): + self.breakpoints[i + 1] -= 1 + + # Split at the maxpools + for i, b in enumerate(self.breakpoints[:-1]): + ops = torch.nn.Sequential() + for idx in range(b, self.breakpoints[i + 1]): + op = vgg_pretrained[idx] + ops.add_module(str(idx), op) + # print(ops) + self.add_module("group{}".format(i), ops) + + # No gradients + for p in self.parameters(): + p.requires_grad = False + + # Torchvision's normalization: <https://github.com/pytorch/examples/blob/42e5b996718797e45c46a25c55b031e6768f8440/imagenet/main.py#L89-L101> + self.register_buffer("shift", torch.Tensor( + [0.485, 0.456, 0.406]).view(1, 3, 1, 1)) + self.register_buffer("scale", torch.Tensor( + [0.229, 0.224, 0.225]).view(1, 3, 1, 1)) + + def forward(self, x): + feats = [] + x = (x - self.shift) / self.scale + for idx in range(len(self.breakpoints) - 1): + m = getattr(self, "group{}".format(idx)) + x = m(x) + feats.append(x) + return feats + + +class L2_(torch.nn.Module): + def __init__(self): + """ + Args: + pre_relu(bool): if True, selects features **before** reLU activations + """ + super(L2_, self).__init__() + # VGG using perceptually-learned weights (LPIPS metric) + augemntations = [] + augemntations.append(transforms.RandomPerspective( + fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop( + 224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + augemntations.append( + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))) + self.augment_trans = transforms.Compose(augemntations) + + def forward(self, pred, target, mode="train"): + """Compare VGG features of two inputs.""" + + # Get VGG features + + sketch_augs, img_augs = [pred], [target] + if mode == "train": + for n in range(4): + augmented_pair = self.augment_trans(torch.cat([pred, target])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + img_augs.append(augmented_pair[1].unsqueeze(0)) + + pred = torch.cat(sketch_augs, dim=0) + target = torch.cat(img_augs, dim=0) + diffs = [torch.square(p - t).mean() for (p, t) in zip(pred, target)] + return sum(diffs) + + +class CLIPVisualEncoder(nn.Module): + def __init__(self, clip_model): + super().__init__() + self.clip_model = clip_model + self.featuremaps = None + + for i in range(12): # 12 resblocks in VIT visual transformer + self.clip_model.visual.transformer.resblocks[i].register_forward_hook( + self.make_hook(i) + ) + + def make_hook(self, name): + def hook(module, input, output): + if len(output.shape) == 3: + self.featuremaps[name] = output.permute( + 1, 0, 2) # LND -> NLD bs, smth, 768 + else: + self.featuremaps[name] = output + + return hook + + def forward(self, x): + self.featuremaps = collections.OrderedDict() + fc_features = self.clip_model.encode_image(x).float() + featuremaps = [self.featuremaps[k] for k in range(12)] + + return fc_features, featuremaps + + +def l2_layers(xs_conv_features, ys_conv_features, clip_model_name): + return [torch.square(x_conv - y_conv).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + + +def l1_layers(xs_conv_features, ys_conv_features, clip_model_name): + return [torch.abs(x_conv - y_conv).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + + +def cos_layers(xs_conv_features, ys_conv_features, clip_model_name): + if "RN" in clip_model_name: + return [torch.square(x_conv, y_conv, dim=1).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + return [(1 - torch.cosine_similarity(x_conv, y_conv, dim=1)).mean() for x_conv, y_conv in + zip(xs_conv_features, ys_conv_features)] + + +class CLIPConvLoss(torch.nn.Module): + def __init__(self, args, device): + super(CLIPConvLoss, self).__init__() + self.args = args + self.device = device + self.clip_model_name = args.clip_model_name + assert self.clip_model_name in [ + "RN50", + "RN101", + "RN50x4", + "RN50x16", + "ViT-B/32", + "ViT-B/16", + ] + + self.clip_conv_loss_type = args.clip_conv_loss_type + self.clip_fc_loss_type = "Cos" # args.clip_fc_loss_type + assert self.clip_conv_loss_type in [ + "L2", "Cos", "L1", + ] + assert self.clip_fc_loss_type in [ + "L2", "Cos", "L1", + ] + + self.distance_metrics = \ + { + "L2": l2_layers, + "L1": l1_layers, + "Cos": cos_layers + } + + self.model, clip_preprocess = clip.load(self.clip_model_name, self.device, jit=False) + + if self.clip_model_name.startswith("ViT"): + self.visual_encoder = CLIPVisualEncoder(self.model) + + else: + self.visual_model = self.model.visual + layers = list(self.model.visual.children()) + init_layers = torch.nn.Sequential(*layers)[:8] + self.layer1 = layers[8] + self.layer2 = layers[9] + self.layer3 = layers[10] + self.layer4 = layers[11] + self.att_pool2d = layers[12] + + self.img_size = clip_preprocess.transforms[1].size + self.model.eval() + self.target_transform = transforms.Compose([ + transforms.ToTensor(), + ]) # clip normalisation + self.normalize_transform = transforms.Compose([ + clip_preprocess.transforms[0], # Resize + clip_preprocess.transforms[1], # CenterCrop + clip_preprocess.transforms[-1], # Normalize + ]) + + self.model.eval() + self.num_augs = self.args.num_aug_clip + + augemntations = [] + if "affine" in args.augemntations: + augemntations.append(transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5)) + augemntations.append(transforms.RandomResizedCrop(224, scale=(0.8, 0.8), ratio=(1.0, 1.0))) + augemntations.append( + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + ) + self.augment_trans = transforms.Compose(augemntations) + + self.clip_fc_layer_dims = None # self.args.clip_fc_layer_dims + self.clip_conv_layer_dims = None # self.args.clip_conv_layer_dims + self.clip_fc_loss_weight = args.clip_fc_loss_weight + self.counter = 0 + + def forward(self, sketch, target, mode="train"): + """ + Parameters + ---------- + sketch: Torch Tensor [1, C, H, W] + target: Torch Tensor [1, C, H, W] + """ + # y = self.target_transform(target).to(self.args.device) + conv_loss_dict = {} + x = sketch.to(self.device) + y = target.to(self.device) + sketch_augs, img_augs = [self.normalize_transform(x)], [self.normalize_transform(y)] + if mode == "train": + for n in range(self.num_augs): + augmented_pair = self.augment_trans(torch.cat([x, y])) + sketch_augs.append(augmented_pair[0].unsqueeze(0)) + img_augs.append(augmented_pair[1].unsqueeze(0)) + + xs = torch.cat(sketch_augs, dim=0).to(self.device) + ys = torch.cat(img_augs, dim=0).to(self.device) + + if self.clip_model_name.startswith("RN"): + xs_fc_features, xs_conv_features = self.forward_inspection_clip_resnet( + xs.contiguous() + ) + ys_fc_features, ys_conv_features = self.forward_inspection_clip_resnet( + ys.detach() + ) + + else: + xs_fc_features, xs_conv_features = self.visual_encoder(xs) + ys_fc_features, ys_conv_features = self.visual_encoder(ys) + + conv_loss = self.distance_metrics[self.clip_conv_loss_type]( + xs_conv_features, ys_conv_features, self.clip_model_name + ) + + for layer, w in enumerate(self.args.clip_conv_layer_weights): + if w: + # layer_ = torch.tensor(layer, dtype=torch.long, device=self.device) + # print(layer_) + conv_loss_dict[f"clip_conv_loss_layer{layer}"] = conv_loss[layer] * w + + if self.clip_fc_loss_weight: + # fc distance is always cos + fc_loss = (1 - torch.cosine_similarity(xs_fc_features, ys_fc_features, dim=1)).mean() + conv_loss_dict["fc"] = fc_loss * self.clip_fc_loss_weight + + self.counter += 1 + return conv_loss_dict + + def forward_inspection_clip_resnet(self, x): + def stem(m, x): + for conv, bn in [(m.conv1, m.bn1), (m.conv2, m.bn2), (m.conv3, m.bn3)]: + x = m.relu(bn(conv(x))) + x = m.avgpool(x) + return x + + x = x.type(self.visual_model.conv1.weight.dtype) + x = stem(self.visual_model, x) + x1 = self.layer1(x) + x2 = self.layer2(x1) + x3 = self.layer3(x2) + x4 = self.layer4(x3) + y = self.att_pool2d(x4) + return y, [x, x1, x2, x3, x4] diff --git a/pytorch_svgrender/painter/clipasso/modified_clip/__init__.py b/pytorch_svgrender/painter/clipasso/modified_clip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dcc5619538c0f7c782508bdbd9587259d805e0d9 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/modified_clip/__init__.py @@ -0,0 +1 @@ +from .clip import * diff --git a/pytorch_svgrender/painter/clipasso/modified_clip/auxilary.py b/pytorch_svgrender/painter/clipasso/modified_clip/auxilary.py new file mode 100644 index 0000000000000000000000000000000000000000..7bed3c1acee7285111739e179acbc229855fdbad --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/modified_clip/auxilary.py @@ -0,0 +1,427 @@ +import warnings +from typing import Tuple, Optional + +import torch +from torch import Tensor +from torch.nn.init import xavier_uniform_ +from torch.nn.init import constant_ +from torch.nn.init import xavier_normal_ +from torch.nn.parameter import Parameter +from torch.nn import functional as F + +# We define this function as _pad because it takes an argument +# named pad, which clobbers the recursive reference to the pad +# function needed for __torch_function__ support +pad = F.pad + + +# This class exists solely for Transformer; it has an annotation stating +# that bias is never None, which appeases TorchScript +class _LinearWithBias(torch.nn.Linear): + bias: Tensor + + def __init__(self, in_features: int, out_features: int) -> None: + super().__init__(in_features, out_features, bias=True) + + +def multi_head_attention_forward(query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Optional[Tensor], + in_proj_bias: Optional[Tensor], + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + attention_probs_forward_hook=None, + attention_probs_backwards_hook=None, + ) -> Tuple[Tensor, Optional[Tensor]]: + if not torch.jit.is_scripting(): + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, + out_proj_weight, out_proj_bias) + if any([type(t) is not Tensor for t in tens_ops]) and F.has_torch_function(tens_ops): + return F.handle_torch_function( + multi_head_attention_forward, tens_ops, query, key, value, + embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, + bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, + out_proj_bias, training=training, key_padding_mask=key_padding_mask, + need_weights=need_weights, attn_mask=attn_mask, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v) + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + # allow MHA to have different sizes for the feature dimension + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if not use_separate_proj_weight: + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = F.linear(query, _w, _b) + + if key is None: + assert value is None + k = None + v = None + else: + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = F.linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = F.linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = F.linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = F.linear(value, _w, _b) + else: + q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight) + len1, len2 = q_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == query.size(-1) + + k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight) + len1, len2 = k_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == key.size(-1) + + v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight) + len1, len2 = v_proj_weight_non_opt.size() + assert len1 == embed_dim and len2 == value.size(-1) + + if in_proj_bias is not None: + q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim]) + k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)]) + v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):]) + else: + q = F.linear(query, q_proj_weight_non_opt, in_proj_bias) + k = F.linear(key, k_proj_weight_non_opt, in_proj_bias) + v = F.linear(value, v_proj_weight_non_opt, in_proj_bias) + q = q * scaling + + if attn_mask is not None: + assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \ + attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \ + 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype) + if attn_mask.dtype == torch.uint8: + warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 2D attn_mask is not correct.') + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]: + raise RuntimeError('The size of the 3D attn_mask is not correct.') + else: + raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim())) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: + warnings.warn( + "Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") + key_padding_mask = key_padding_mask.to(torch.bool) + + if bias_k is not None and bias_v is not None: + if static_k is None and static_v is None: + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + else: + assert bias_k is None + assert bias_v is None + + q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if k is not None: + k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + if v is not None: + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + if static_k is not None: + assert static_k.size(0) == bsz * num_heads + assert static_k.size(2) == head_dim + k = static_k + + if static_v is not None: + assert static_v.size(0) == bsz * num_heads + assert static_v.size(2) == head_dim + v = static_v + + src_len = k.size(1) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz + assert key_padding_mask.size(1) == src_len + + if add_zero_attn: + src_len += 1 + k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + attn_output_weights = torch.bmm(q, k.transpose(1, 2)) + assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float('-inf')) + else: + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float('-inf'), + ) + attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len) + + attn_output_weights = F.softmax(attn_output_weights, dim=-1) + attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training) + + # use hooks for the attention weights if necessary + if attention_probs_forward_hook is not None and attention_probs_backwards_hook is not None: + attention_probs_forward_hook(attn_output_weights) + attn_output_weights.register_hook(attention_probs_backwards_hook) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) + attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias) + + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None + + +class MultiheadAttention(torch.nn.Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See reference: Attention Is All You Need + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + \text{where} head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V) + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + bias: add bias as module parameter. Default: True. + add_bias_kv: add bias to the key and value sequences at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + kdim: total number of features in key. Default: None. + vdim: total number of features in value. Default: None. + + Note: if kdim and vdim are None, they will be set to embed_dim such that + query, key, and value have the same number of features. + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, + vdim=None): + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim + + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim)) + self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim)) + self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim)) + self.register_parameter('in_proj_weight', None) + else: + self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim)) + self.register_parameter('q_proj_weight', None) + self.register_parameter('k_proj_weight', None) + self.register_parameter('v_proj_weight', None) + + if bias: + self.in_proj_bias = Parameter(torch.empty(3 * embed_dim)) + else: + self.register_parameter('in_proj_bias', None) + self.out_proj = _LinearWithBias(embed_dim, embed_dim) + + if add_bias_kv: + self.bias_k = Parameter(torch.empty(1, 1, embed_dim)) + self.bias_v = Parameter(torch.empty(1, 1, embed_dim)) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self._qkv_same_embed_dim: + xavier_uniform_(self.in_proj_weight) + else: + xavier_uniform_(self.q_proj_weight) + xavier_uniform_(self.k_proj_weight) + xavier_uniform_(self.v_proj_weight) + + if self.in_proj_bias is not None: + constant_(self.in_proj_bias, 0.) + constant_(self.out_proj.bias, 0.) + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if '_qkv_same_embed_dim' not in state: + state['_qkv_same_embed_dim'] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward(self, query, key, value, key_padding_mask=None, + need_weights=True, attn_mask=None, + attention_probs_forward_hook=None, + attention_probs_backwards_hook=None): + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + attention_probs_forward_hook: + attention_probs_backwards_hook: + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + if not self._qkv_same_embed_dim: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, use_separate_proj_weight=True, + q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight, + v_proj_weight=self.v_proj_weight, + attention_probs_forward_hook=attention_probs_forward_hook, + attention_probs_backwards_hook=attention_probs_backwards_hook) + else: + return multi_head_attention_forward( + query, key, value, self.embed_dim, self.num_heads, + self.in_proj_weight, self.in_proj_bias, + self.bias_k, self.bias_v, self.add_zero_attn, + self.dropout, self.out_proj.weight, self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, need_weights=need_weights, + attn_mask=attn_mask, + attention_probs_forward_hook=attention_probs_forward_hook, + attention_probs_backwards_hook=attention_probs_backwards_hook) diff --git a/pytorch_svgrender/painter/clipasso/modified_clip/bpe_simple_vocab_16e6.txt.gz b/pytorch_svgrender/painter/clipasso/modified_clip/bpe_simple_vocab_16e6.txt.gz new file mode 100644 index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/modified_clip/bpe_simple_vocab_16e6.txt.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a +size 1356917 diff --git a/pytorch_svgrender/painter/clipasso/modified_clip/clip.py b/pytorch_svgrender/painter/clipasso/modified_clip/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..76f241b053e3a6da06b1165e73e0d54c5b5356b2 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/modified_clip/clip.py @@ -0,0 +1,193 @@ +import hashlib +import os +import urllib +import warnings +from typing import Union, List + +import torch +from PIL import Image +from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize +from tqdm import tqdm + +from .model import build_model +from .simple_tokenizer import SimpleTokenizer as _Tokenizer + +__all__ = ["available_models", "load", "tokenize"] +_tokenizer = _Tokenizer() + +_MODELS = { + "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", + "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", + "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", + "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", +} + + +def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")): + os.makedirs(root, exist_ok=True) + filename = os.path.basename(url) + + expected_sha256 = url.split("/")[-2] + download_target = os.path.join(root, filename) + + if os.path.exists(download_target) and not os.path.isfile(download_target): + raise RuntimeError(f"{download_target} exists and is not a regular file") + + if os.path.isfile(download_target): + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: + return download_target + else: + warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") + + with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: + with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: + while True: + buffer = source.read(8192) + if not buffer: + break + + output.write(buffer) + loop.update(len(buffer)) + + if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: + raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match") + + return download_target + + +def _transform(n_px): + return Compose([ + Resize(n_px, interpolation=Image.BICUBIC), + CenterCrop(n_px), + lambda image: image.convert("RGB"), + ToTensor(), + Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), + ]) + + +def available_models() -> List[str]: + """Returns the names of available CLIP models""" + return list(_MODELS.keys()) + + +def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True): + """Load a CLIP model + + Parameters + ---------- + name : str + A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict + + device : Union[str, torch.device] + The device to put the loaded model + + jit : bool + Whether to load the optimized JIT model (default) or more hackable non-JIT model. + + Returns + ------- + model : torch.nn.Module + The CLIP model + + preprocess : Callable[[PIL.Image], torch.Tensor] + A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input + """ + if name in _MODELS: + model_path = _download(_MODELS[name]) + elif os.path.isfile(name): + model_path = name + else: + raise RuntimeError(f"Model {name} not found; available models = {available_models()}") + + try: + # loading JIT archive + model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() + state_dict = None + except RuntimeError: + # loading saved state dict + if jit: + warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") + jit = False + state_dict = torch.load(model_path, map_location="cpu") + + if not jit: + model = build_model(state_dict or model.state_dict()).to(device) + if str(device) == "cpu": + model.float() + return model, _transform(model.visual.input_resolution) + + # patch the device names + device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) + device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] + + def patch_device(module): + graphs = [module.graph] if hasattr(module, "graph") else [] + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("prim::Constant"): + if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): + node.copyAttributes(device_node) + + model.apply(patch_device) + patch_device(model.encode_image) + patch_device(model.encode_text) + + # patch dtype to float32 on CPU + if str(device) == "cpu": + float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) + float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] + float_node = float_input.node() + + def patch_float(module): + graphs = [module.graph] if hasattr(module, "graph") else [] + if hasattr(module, "forward1"): + graphs.append(module.forward1.graph) + + for graph in graphs: + for node in graph.findAllNodes("aten::to"): + inputs = list(node.inputs()) + for i in [1, 2]: # dtype can be the second or third argument to aten::to() + if inputs[i].node()["value"] == 5: + inputs[i].node().copyAttributes(float_node) + + model.apply(patch_float) + patch_float(model.encode_image) + patch_float(model.encode_text) + + model.float() + + return model, _transform(model.input_resolution.item()) + + +def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor: + """ + Returns the tokenized representation of given input string(s) + + Parameters + ---------- + texts : Union[str, List[str]] + An input string or a list of input strings to tokenize + + context_length : int + The context length to use; all CLIP models use 77 as the context length + + Returns + ------- + A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] + """ + if isinstance(texts, str): + texts = [texts] + + sot_token = _tokenizer.encoder["<|startoftext|>"] + eot_token = _tokenizer.encoder["<|endoftext|>"] + all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] + result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + if len(tokens) > context_length: + raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}") + result[i, :len(tokens)] = torch.tensor(tokens) + + return result diff --git a/pytorch_svgrender/painter/clipasso/modified_clip/example.py b/pytorch_svgrender/painter/clipasso/modified_clip/example.py new file mode 100644 index 0000000000000000000000000000000000000000..efe6c6f028dfd687f8d9da1a354362274f522363 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/modified_clip/example.py @@ -0,0 +1,94 @@ +import torch +import clip +from PIL import Image +import numpy as np +import cv2 +import matplotlib.pyplot as plt + +def interpret(image, text, model, device, index=None): + logits_per_image, logits_per_text = model(image, text) + probs = logits_per_image.softmax(dim=-1).detach().cpu().numpy() + if index is None: + index = np.argmax(logits_per_image.cpu().data.numpy(), axis=-1) + one_hot = np.zeros((1, logits_per_image.size()[-1]), dtype=np.float32) + one_hot[0, index] = 1 + one_hot = torch.from_numpy(one_hot).requires_grad_(True) + one_hot = torch.sum(one_hot.cuda() * logits_per_image) + model.zero_grad() + one_hot.backward(retain_graph=True) + + image_attn_blocks = list(dict(model.visual.transformer.resblocks.named_children()).values()) + num_tokens = image_attn_blocks[0].attn_probs.shape[-1] + R = torch.eye(num_tokens, num_tokens, dtype=image_attn_blocks[0].attn_probs.dtype).to(device) + for blk in image_attn_blocks: + grad = blk.attn_grad + cam = blk.attn_probs + cam = cam.reshape(-1, cam.shape[-1], cam.shape[-1]) + grad = grad.reshape(-1, grad.shape[-1], grad.shape[-1]) + cam = grad * cam + cam = cam.clamp(min=0).mean(dim=0) + R += torch.matmul(cam, R) + R[0, 0] = 0 + image_relevance = R[0, 1:] + + # create heatmap from mask on image + def show_cam_on_image(img, mask): + heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET) + heatmap = np.float32(heatmap) / 255 + cam = heatmap + np.float32(img) + cam = cam / np.max(cam) + return cam + + image_relevance = image_relevance.reshape(1, 1, 7, 7) + image_relevance = torch.nn.functional.interpolate(image_relevance, size=224, mode='bilinear') + image_relevance = image_relevance.reshape(224, 224).cuda().data.cpu().numpy() + image_relevance = (image_relevance - image_relevance.min()) / (image_relevance.max() - image_relevance.min()) + image = image[0].permute(1, 2, 0).data.cpu().numpy() + image = (image - image.min()) / (image.max() - image.min()) + vis = show_cam_on_image(image, image_relevance) + vis = np.uint8(255 * vis) + vis = cv2.cvtColor(np.array(vis), cv2.COLOR_RGB2BGR) + + plt.imshow(vis) + plt.show() + + print("Label probs:", probs) + +def main(): + device = "cuda" if torch.cuda.is_available() else "cpu" + model, preprocess = clip.load("ViT-B/32", device=device, jit=False) + + image = preprocess(Image.open("catdog.png")).unsqueeze(0).to(device) + text = clip.tokenize(["a dog", "a cat"]).to(device) + interpret(model=model, image=image, text=text, device=device, index=0) + interpret(model=model, image=image, text=text, device=device, index=1) + + image = preprocess(Image.open("el1.png")).unsqueeze(0).to(device) + text = clip.tokenize(["an elephant", "a zebra"]).to(device) + interpret(model=model, image=image, text=text, device=device, index=0) + interpret(model=model, image=image, text=text, device=device, index=1) + + image = preprocess(Image.open("el2.png")).unsqueeze(0).to(device) + text = clip.tokenize(["an elephant", "a zebra"]).to(device) + interpret(model=model, image=image, text=text, device=device, index=0) + interpret(model=model, image=image, text=text, device=device, index=1) + + image = preprocess(Image.open("el3.png")).unsqueeze(0).to(device) + text = clip.tokenize(["an elephant", "a zebra"]).to(device) + interpret(model=model, image=image, text=text, device=device, index=0) + interpret(model=model, image=image, text=text, device=device, index=1) + + image = preprocess(Image.open("el4.png")).unsqueeze(0).to(device) + text = clip.tokenize(["an elephant", "a zebra"]).to(device) + interpret(model=model, image=image, text=text, device=device, index=0) + interpret(model=model, image=image, text=text, device=device, index=1) + + image = preprocess(Image.open("dogbird.png")).unsqueeze(0).to(device) + text = clip.tokenize(["a basset hound", "a parrot"]).to(device) + interpret(model=model, image=image, text=text, device=device, index=0) + interpret(model=model, image=image, text=text, device=device, index=1) + + +if __name__ == "__main__": + main() + diff --git a/pytorch_svgrender/painter/clipasso/modified_clip/model.py b/pytorch_svgrender/painter/clipasso/modified_clip/model.py new file mode 100644 index 0000000000000000000000000000000000000000..9a1175c995cc9233670522ef24ea37d42c09c901 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/modified_clip/model.py @@ -0,0 +1,451 @@ +from collections import OrderedDict +from typing import Union, Tuple + +import numpy as np +import torch +from torch import nn +from .auxilary import MultiheadAttention, multi_head_attention_forward + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1): + super().__init__() + + # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() + + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + + self.relu = nn.ReLU(inplace=True) + self.downsample = None + self.stride = stride + + if stride > 1 or inplanes != planes * Bottleneck.expansion: + # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 + self.downsample = nn.Sequential(OrderedDict([ + ("-1", nn.AvgPool2d(stride)), + ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), + ("1", nn.BatchNorm2d(planes * self.expansion)) + ])) + + def forward(self, x: torch.Tensor): + identity = x + + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + out = self.avgpool(out) + out = self.bn3(self.conv3(out)) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + return out + + +class AttentionPool2d(nn.Module): + + def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): + super().__init__() + self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim) + self.q_proj = nn.Linear(embed_dim, embed_dim) + self.v_proj = nn.Linear(embed_dim, embed_dim) + self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) + self.num_heads = num_heads + + def forward(self, x): + x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC + x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC + x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC + x, _ = multi_head_attention_forward( + query=x, key=x, value=x, + embed_dim_to_check=x.shape[-1], + num_heads=self.num_heads, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + in_proj_weight=None, + in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), + bias_k=None, + bias_v=None, + add_zero_attn=False, + dropout_p=0, + out_proj_weight=self.c_proj.weight, + out_proj_bias=self.c_proj.bias, + use_separate_proj_weight=True, + training=self.training, + need_weights=False + ) + + return x[0] + + +class ModifiedResNet(nn.Module): + """ + A ResNet class that is similar to torchvision's but contains the following changes: + - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. + - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 + - The final pooling layer is a QKV attention instead of an average pool + """ + + def __init__(self, layers, output_dim, heads, input_resolution=224, width=64): + super().__init__() + self.output_dim = output_dim + self.input_resolution = input_resolution + + # the 3-layer stem + self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) + self.bn1 = nn.BatchNorm2d(width // 2) + self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(width // 2) + self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) + self.bn3 = nn.BatchNorm2d(width) + self.avgpool = nn.AvgPool2d(2) + self.relu = nn.ReLU(inplace=True) + + # residual layers + self._inplanes = width # this is a *mutable* variable used during construction + self.layer1 = self._make_layer(width, layers[0]) + self.layer2 = self._make_layer(width * 2, layers[1], stride=2) + self.layer3 = self._make_layer(width * 4, layers[2], stride=2) + self.layer4 = self._make_layer(width * 8, layers[3], stride=2) + + embed_dim = width * 32 # the ResNet feature dimension + self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim) + + def _make_layer(self, planes, blocks, stride=1): + layers = [Bottleneck(self._inplanes, planes, stride)] + + self._inplanes = planes * Bottleneck.expansion + for _ in range(1, blocks): + layers.append(Bottleneck(self._inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + def stem(x): + for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]: + x = self.relu(bn(conv(x))) + x = self.avgpool(x) + return x + + x = x.type(self.conv1.weight.dtype) + x = stem(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.attnpool(x) + + return x + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor): + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class QuickGELU(nn.Module): + def forward(self, x: torch.Tensor): + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): + super().__init__() + + self.attn = MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential(OrderedDict([ + ("c_fc", nn.Linear(d_model, d_model * 4)), + ("gelu", QuickGELU()), + ("c_proj", nn.Linear(d_model * 4, d_model)) + ])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + self.attn_probs = None + self.attn_grad = None + + def set_attn_probs(self, attn_probs): + self.attn_probs = attn_probs + + def set_attn_grad(self, attn_grad): + self.attn_grad = attn_grad + + def attention(self, x: torch.Tensor): + self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None + return self.attn( + x, x, x, need_weights=False, attn_mask=self.attn_mask, + attention_probs_forward_hook=self.set_attn_probs, + attention_probs_backwards_hook=self.set_attn_grad + )[0] + + def forward(self, x: torch.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) + + def forward(self, x: torch.Tensor): + return self.resblocks(x) + + +class VisualTransformer(nn.Module): + def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int): + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) + + scale = width ** -0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads) + + self.ln_post = LayerNorm(width) + self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) + + def forward(self, x: torch.Tensor): + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat( + [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), + x], dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + + x = self.ln_post(x[:, 0, :]) + + if self.proj is not None: + x = x @ self.proj + + return x + + +class CLIP(nn.Module): + + def __init__(self, + embed_dim: int, + # vision + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + # text + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int + ): + super().__init__() + + self.context_length = context_length + + if isinstance(vision_layers, (tuple, list)): + vision_heads = vision_width * 32 // 64 + self.visual = ModifiedResNet( + layers=vision_layers, + output_dim=embed_dim, + heads=vision_heads, + input_resolution=image_resolution, + width=vision_width + ) + else: + vision_heads = vision_width // 64 + self.visual = VisualTransformer( + input_resolution=image_resolution, + patch_size=vision_patch_size, + width=vision_width, + layers=vision_layers, + heads=vision_heads, + output_dim=embed_dim + ) + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask() + ) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.initialize_parameters() + + def initialize_parameters(self): + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + + if isinstance(self.visual, ModifiedResNet): + if self.visual.attnpool is not None: + std = self.visual.attnpool.c_proj.in_features ** -0.5 + nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) + nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) + + for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]: + for name, param in resnet_block.named_parameters(): + if name.endswith("bn3.weight"): + nn.init.zeros_(param) + + proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) + attn_std = self.transformer.width ** -0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) + + def build_attention_mask(self): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self): + return self.visual.conv1.weight.dtype + + def encode_image(self, image): + return self.visual(image.type(self.dtype)) + + def encode_text(self, text): + x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.type(self.dtype) + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x).type(self.dtype) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection + + return x + + def forward(self, image, text): + image_features = self.encode_image(image) + text_features = self.encode_text(text) + + # normalized features + image_features = image_features / image_features.norm(dim=-1, keepdim=True) + text_features = text_features / text_features.norm(dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logit_scale * text_features @ image_features.t() + + # shape = [global_batch_size, global_batch_size] + return logits_per_image, logits_per_text + + +def convert_weights(model: nn.Module): + """Convert applicable model parameters to fp16""" + + def _convert_weights_to_fp16(l): + if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): + l.weight.data = l.weight.data.half() + if l.bias is not None: + l.bias.data = l.bias.data.half() + + if isinstance(l, MultiheadAttention): + for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: + tensor = getattr(l, attr) + if tensor is not None: + tensor.data = tensor.data.half() + + for name in ["text_projection", "proj"]: + if hasattr(l, name): + attr = getattr(l, name) + if attr is not None: + attr.data = attr.data.half() + + model.apply(_convert_weights_to_fp16) + + +def build_model(state_dict: dict): + vit = "visual.proj" in state_dict + + if vit: + vision_width = state_dict["visual.conv1.weight"].shape[0] + vision_layers = len( + [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) + vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] + grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) + image_resolution = vision_patch_size * grid_size + else: + counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in + [1, 2, 3, 4]] + vision_layers = tuple(counts) + vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] + output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) + vision_patch_size = None + assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] + image_resolution = output_width * 32 + + embed_dim = state_dict["text_projection"].shape[1] + context_length = state_dict["positional_embedding"].shape[0] + vocab_size = state_dict["token_embedding.weight"].shape[0] + transformer_width = state_dict["ln_final.weight"].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) + + model = CLIP( + embed_dim, + image_resolution, vision_layers, vision_width, vision_patch_size, + context_length, vocab_size, transformer_width, transformer_heads, transformer_layers + ) + + for key in ["input_resolution", "context_length", "vocab_size"]: + if key in state_dict: + del state_dict[key] + + convert_weights(model) + model.load_state_dict(state_dict) + return model.eval() diff --git a/pytorch_svgrender/painter/clipasso/modified_clip/simple_tokenizer.py b/pytorch_svgrender/painter/clipasso/modified_clip/simple_tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8e37b4e10274fc76cb7fcd106a14a5dcbbe5838a --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/modified_clip/simple_tokenizer.py @@ -0,0 +1,138 @@ +import gzip +import html +import os +from functools import lru_cache + +import ftfy +import regex as re + + +@lru_cache() +def default_bpe(): + return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a signficant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + cs = bs[:] + n = 0 + for b in range(2 ** 8): + if b not in bs: + bs.append(b) + cs.append(2 ** 8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """Return set of symbol pairs in a word. + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + +def whitespace_clean(text): + text = re.sub(r'\s+', ' ', text) + text = text.strip() + return text + + +class SimpleTokenizer(object): + def __init__(self, bpe_path: str = default_bpe()): + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + try: + merges = gzip.open(bpe_path).read().split('\n') + except TypeError as err: + merges = gzip.open(bpe_path).read().decode('utf-8').split('\n') + + merges = merges[1:49152 - 256 - 2 + 1] + merges = [tuple(merge.split()) for merge in merges] + vocab = list(bytes_to_unicode().values()) + vocab = vocab + [v + '</w>' for v in vocab] + for merge in merges: + vocab.append(''.join(merge)) + vocab.extend(['<|startoftext|>', '<|endoftext|>']) + self.encoder = dict(zip(vocab, range(len(vocab)))) + self.decoder = {v: k for k, v in self.encoder.items()} + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'} + self.pat = re.compile( + r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", + re.IGNORECASE) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token[:-1]) + (token[-1] + '</w>',) + pairs = get_pairs(word) + + if not pairs: + return token + '</w>' + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = ' '.join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + text = whitespace_clean(basic_clean(text)).lower() + for token in re.findall(self.pat, text): + token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) + return bpe_tokens + + def decode(self, tokens): + text = ''.join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ') + return text diff --git a/pytorch_svgrender/painter/clipasso/painter_params.py b/pytorch_svgrender/painter/clipasso/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..64dc0f11bdaa5b72ca0da5f48484c9991f7a84f1 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/painter_params.py @@ -0,0 +1,353 @@ +import pathlib +import random + +import omegaconf +import pydiffvg +import numpy as np +import torch +from torch.optim.lr_scheduler import LambdaLR +from torchvision import transforms + +from pytorch_svgrender.diffvg_warp import DiffVGState +from pytorch_svgrender.libs.modules.edge_map.DoG import XDoG +from .grad_cam import gradCAM +from . import modified_clip as clip + + +class Painter(DiffVGState): + + def __init__( + self, + method_cfg: omegaconf.DictConfig, + diffvg_cfg: omegaconf.DictConfig, + num_strokes: int = 4, + canvas_size: int = 224, + device=None, + target_im=None, + mask=None + ): + super(Painter, self).__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + + self.args = method_cfg + self.num_paths = num_strokes + self.num_segments = method_cfg.num_segments + self.width = method_cfg.width + self.control_points_per_seg = method_cfg.control_points_per_seg + self.opacity_optim = method_cfg.force_sparse + self.num_stages = method_cfg.num_stages + self.noise_thresh = method_cfg.noise_thresh + self.softmax_temp = method_cfg.softmax_temp + + self.color_vars_threshold = method_cfg.color_vars_threshold + + self.path_svg = method_cfg.path_svg + self.strokes_per_stage = self.num_paths + self.optimize_flag = [] + + # attention related for strokes initialisation + self.attention_init = method_cfg.attention_init + self.saliency_model = method_cfg.saliency_model + self.xdog_intersec = method_cfg.xdog_intersec + self.mask_object = method_cfg.mask_object_attention + + self.text_target = method_cfg.text_target # for clip gradients + self.saliency_clip_model = method_cfg.saliency_clip_model + self.image2clip_input = self.clip_preprocess(target_im) + self.mask = mask + self.attention_map = self.set_attention_map() if self.attention_init else None + + self.thresh = self.set_attention_threshold_map() if self.attention_init else None + self.strokes_counter = 0 # counts the number of calls to "get_path" + self.epoch = 0 + self.final_epoch = method_cfg.num_iter - 1 + + def init_image(self, stage=0): + if stage > 0: + # Noting: if multi stages training than add new strokes on existing ones + # don't optimize on previous strokes + self.optimize_flag = [False for i in range(len(self.shapes))] + for i in range(self.strokes_per_stage): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag.append(True) + else: + num_paths_exists = 0 + if self.path_svg is not None and pathlib.Path(self.path_svg).exists(): + print(f"-> init svg from `{self.path_svg}` ...") + + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(self.path_svg) + # if you want to add more strokes to existing ones and optimize on all of them + num_paths_exists = len(self.shapes) + + for i in range(num_paths_exists, self.num_paths): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag = [True for i in range(len(self.shapes))] + + img = self.render_warp() + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * ( + 1 - img[:, :, 3:4]) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + + return img + + def get_image(self): + img = self.render_warp() + opacity = img[:, :, 3:4] + img = opacity * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - opacity) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self): + points = [] + self.num_control_points = torch.zeros(self.num_segments, dtype=torch.int32) + (self.control_points_per_seg - 2) + p0 = self.inds_normalised[self.strokes_counter] if self.attention_init else (random.random(), random.random()) + points.append(p0) + + for j in range(self.num_segments): + radius = 0.05 + for k in range(self.control_points_per_seg - 1): + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + points.append(p1) + p0 = p1 + points = torch.tensor(points).to(self.device) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + + path = pydiffvg.Path(num_control_points=self.num_control_points, + points=points, + stroke_width=torch.tensor(self.width), + is_closed=False) + self.strokes_counter += 1 + return path + + def render_warp(self): + if self.opacity_optim: + for group in self.shape_groups: + group.stroke_color.data[:3].clamp_(0., 0.) # to force black stroke + group.stroke_color.data[-1].clamp_(0., 1.) # opacity + # group.stroke_color.data[-1] = (group.stroke_color.data[-1] >= self.color_vars_threshold).float() + + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene( + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups + ) + img = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + return img + + def set_point_parameters(self): + self.point_vars = [] + # storkes' location optimization + for i, path in enumerate(self.shapes): + if self.optimize_flag[i]: + path.points.requires_grad = True + self.point_vars.append(path.points) + + def get_point_parameters(self): + return self.point_vars + + def set_color_parameters(self): + # for storkes' color optimization (opacity) + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if self.optimize_flag[i]: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + def get_color_parameters(self): + return self.color_vars + + def save_svg(self, output_dir: str, name: str): + pydiffvg.save_svg(f'{output_dir}/{name}.svg', + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups) + + def clip_preprocess(self, target_im): + model, preprocess = clip.load(self.saliency_clip_model, device=self.device, jit=False) + model.eval().to(self.device) + data_transforms = transforms.Compose([ + preprocess.transforms[-1], + ]) + return data_transforms(target_im).to(self.device) + + def clip_attn(self): + model, preprocess = clip.load(self.saliency_clip_model, device=self.device, jit=False) + model.eval().to(self.device) + + if "RN" in self.saliency_clip_model: + text_input = clip.tokenize([self.text_target]).to(self.device) + saliency_layer = "layer4" + attn_map = gradCAM( + model.visual, + self.image2clip_input, + model.encode_text(text_input).float(), + getattr(model.visual, saliency_layer) + ) + attn_map = attn_map.squeeze().detach().cpu().numpy() + attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min()) + else: # ViT + attn_map = interpret(self.image2clip_input, model, self.device) + + del model + return attn_map + + def set_attention_map(self): + assert self.saliency_model in ["clip"] + if self.saliency_model == "clip": + return self.clip_attn() + + def softmax(self, x, tau=0.2): + e_x = np.exp(x / tau) + return e_x / e_x.sum() + + def set_inds_clip(self): + attn_map = (self.attention_map - self.attention_map.min()) / \ + (self.attention_map.max() - self.attention_map.min()) + if self.xdog_intersec: + xdog = XDoG(k=10) + im_xdog = xdog(self.image2clip_input[0].permute(1, 2, 0).cpu().numpy()) + intersec_map = (1 - im_xdog) * attn_map + attn_map = intersec_map + + attn_map_soft = np.copy(attn_map) + attn_map_soft[attn_map > 0] = self.softmax(attn_map[attn_map > 0], tau=self.softmax_temp) + + k = self.num_stages * self.num_paths + self.inds = np.random.choice(range(attn_map.flatten().shape[0]), size=k, replace=False, + p=attn_map_soft.flatten()) + self.inds = np.array(np.unravel_index(self.inds, attn_map.shape)).T + + self.inds_normalised = np.zeros(self.inds.shape) + self.inds_normalised[:, 0] = self.inds[:, 1] / self.canvas_width + self.inds_normalised[:, 1] = self.inds[:, 0] / self.canvas_height + self.inds_normalised = self.inds_normalised.tolist() + return attn_map_soft + + def set_attention_threshold_map(self): + assert self.saliency_model in ["clip"] + if self.saliency_model == "clip": + return self.set_inds_clip() + + def get_attn(self): + return self.attention_map + + def get_thresh(self): + return self.thresh + + def get_inds(self): + return self.inds + + def get_mask(self): + return self.mask + + +class PainterOptimizer: + + def __init__(self, renderer: Painter, num_iter: int, points_lr: float, force_sparse: bool, color_lr: float): + self.renderer = renderer + self.num_iter = num_iter + self.points_lr = points_lr + self.color_lr = color_lr + self.optim_color = force_sparse + + self.points_optimizer, self.color_optimizer = None, None + self.scheduler = None + + def init_optimizers(self): + # optimizers + self.renderer.set_point_parameters() + self.points_optimizer = torch.optim.Adam(self.renderer.get_point_parameters(), lr=self.points_lr) + if self.optim_color: + self.renderer.set_color_parameters() + self.color_optimizer = torch.optim.Adam(self.renderer.get_color_parameters(), lr=self.color_lr) + # lr schedule + lr_lambda_fn = LinearDecayLR(self.num_iter, 0.4) + self.scheduler = LambdaLR(self.points_optimizer, lr_lambda=lr_lambda_fn, last_epoch=-1) + + def update_lr(self): + self.scheduler.step() + + def zero_grad_(self): + self.points_optimizer.zero_grad() + if self.optim_color: + self.color_optimizer.zero_grad() + + def step_(self): + self.points_optimizer.step() + if self.optim_color: + self.color_optimizer.step() + + def get_lr(self): + return self.points_optimizer.param_groups[0]['lr'] + + +class LinearDecayLR: + + def __init__(self, decay_every, decay_ratio): + self.decay_every = decay_every + self.decay_ratio = decay_ratio + + def __call__(self, n): + decay_time = n // self.decay_every + decay_step = n % self.decay_every + lr_s = self.decay_ratio ** decay_time + lr_e = self.decay_ratio ** (decay_time + 1) + r = decay_step / self.decay_every + lr = lr_s * (1 - r) + lr_e * r + return lr + + +def interpret(image, clip_model, device): + # virtual forward to get attention map + images = image.repeat(1, 1, 1, 1) + _ = clip_model.encode_image(images) # ensure `attn_probs` in attention is not empty + clip_model.zero_grad() + + image_attn_blocks = list(dict(clip_model.visual.transformer.resblocks.named_children()).values()) + # create R to store attention map + num_tokens = image_attn_blocks[0].attn_probs.shape[-1] + R = torch.eye(num_tokens, num_tokens, dtype=image_attn_blocks[0].attn_probs.dtype).to(device) + R = R.unsqueeze(0).expand(1, num_tokens, num_tokens) + + cams = [] + for i, blk in enumerate(image_attn_blocks): # 12 attention blocks + cam = blk.attn_probs.detach() # attn_probs shape: [12, 50, 50] + # each patch is 7x7 so we have 49 pixels + 1 for positional encoding + cam = cam.reshape(1, -1, cam.shape[-1], cam.shape[-1]) + cam = cam.clamp(min=0) + cam = cam.clamp(min=0).mean(dim=1) # mean of the 12 something + cams.append(cam) + R = R + torch.bmm(cam, R) + + cams_avg = torch.cat(cams) # [12, 50, 50] + cams_avg = cams_avg[:, 0, 1:] # [12, 49] + image_relevance = cams_avg.mean(dim=0).unsqueeze(0) # [1, 49] + image_relevance = image_relevance.reshape(1, 1, 7, 7) # [1, 1, 7, 7] + # interpolate: [1, 1, 7, 7] -> [1, 3, 224, 224] + image_relevance = torch.nn.functional.interpolate(image_relevance, size=224, mode='bicubic') + image_relevance = image_relevance.reshape(224, 224).data.cpu().numpy().astype(np.float32) + # normalize the tensor to [0, 1] + image_relevance = (image_relevance - image_relevance.min()) / (image_relevance.max() - image_relevance.min()) + return image_relevance diff --git a/pytorch_svgrender/painter/clipasso/sketch_utils.py b/pytorch_svgrender/painter/clipasso/sketch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8eee1e45c6002638fe4145458941e6cc6d10fd39 --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/sketch_utils.py @@ -0,0 +1,149 @@ +from PIL import Image + +import matplotlib.pyplot as plt +import numpy as np +import torch +from torchvision import transforms +from torchvision.utils import make_grid +from skimage.transform import resize + +from .u2net import U2NET + + +def plot_attn_dino(attn, threshold_map, inputs, inds, output_path): + # currently supports one image (and not a batch) + plt.figure(figsize=(10, 5)) + + plt.subplot(2, attn.shape[0] + 2, 1) + main_im = make_grid(inputs, normalize=True, pad_value=2) + main_im = np.transpose(main_im.cpu().numpy(), (1, 2, 0)) + plt.imshow(main_im, interpolation='nearest') + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.title("input im") + plt.axis("off") + + plt.subplot(2, attn.shape[0] + 2, 2) + plt.imshow(attn.sum(0).numpy(), interpolation='nearest') + plt.title("atn map sum") + plt.axis("off") + + plt.subplot(2, attn.shape[0] + 2, attn.shape[0] + 3) + plt.imshow(threshold_map[-1].numpy(), interpolation='nearest') + plt.title("prob sum") + plt.axis("off") + + plt.subplot(2, attn.shape[0] + 2, attn.shape[0] + 4) + plt.imshow(threshold_map[:-1].sum(0).numpy(), interpolation='nearest') + plt.title("thresh sum") + plt.axis("off") + + for i in range(attn.shape[0]): + plt.subplot(2, attn.shape[0] + 2, i + 3) + plt.imshow(attn[i].numpy()) + plt.axis("off") + plt.subplot(2, attn.shape[0] + 2, attn.shape[0] + 1 + i + 4) + plt.imshow(threshold_map[i].numpy()) + plt.axis("off") + plt.tight_layout() + plt.savefig(output_path) + plt.close() + + +def plot_attn_clip(attn, threshold_map, inputs, inds, output_path): + # currently supports one image (and not a batch) + plt.figure(figsize=(10, 5)) + + plt.subplot(1, 3, 1) + main_im = make_grid(inputs, normalize=True, pad_value=2) + main_im = np.transpose(main_im.cpu().numpy(), (1, 2, 0)) + plt.imshow(main_im, interpolation='nearest') + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.title("input im") + plt.axis("off") + + plt.subplot(1, 3, 2) + plt.imshow(attn, interpolation='nearest', vmin=0, vmax=1) + plt.title("attn map") + plt.axis("off") + + plt.subplot(1, 3, 3) + threshold_map_ = (threshold_map - threshold_map.min()) / \ + (threshold_map.max() - threshold_map.min()) + plt.imshow(threshold_map_, interpolation='nearest', vmin=0, vmax=1) + plt.title("prob softmax") + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.axis("off") + + plt.tight_layout() + plt.savefig(output_path) + plt.close() + + +def plot_attn(attn, threshold_map, inputs, inds, output_path, saliency_model): + if saliency_model == "dino": + plot_attn_dino(attn, threshold_map, inputs, inds, output_path) + elif saliency_model == "clip": + plot_attn_clip(attn, threshold_map, inputs, inds, output_path) + + +def fix_image_scale(im): + im_np = np.array(im) / 255 + height, width = im_np.shape[0], im_np.shape[1] + max_len = max(height, width) + 20 + new_background = np.ones((max_len, max_len, 3)) + y, x = max_len // 2 - height // 2, max_len // 2 - width // 2 + new_background[y: y + height, x: x + width] = im_np + new_background = (new_background / new_background.max() * 255).astype(np.uint8) + new_im = Image.fromarray(new_background) + return new_im + + +def get_mask_u2net(pil_im, output_dir, u2net_path, device="cpu"): + # input preprocess + w, h = pil_im.size[0], pil_im.size[1] + im_size = min(w, h) + data_transforms = transforms.Compose([ + transforms.Resize(min(320, im_size), interpolation=transforms.InterpolationMode.BICUBIC), + transforms.ToTensor(), + transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), + std=(0.26862954, 0.26130258, 0.27577711)), + ]) + input_im_trans = data_transforms(pil_im).unsqueeze(0).to(device) + + # load U^2 Net model + net = U2NET(in_ch=3, out_ch=1) + net.load_state_dict(torch.load(u2net_path)) + net.to(device) + net.eval() + + # get mask + with torch.no_grad(): + d1, d2, d3, d4, d5, d6, d7 = net(input_im_trans.detach()) + pred = d1[:, 0, :, :] + pred = (pred - pred.min()) / (pred.max() - pred.min()) + predict = pred + predict[predict < 0.5] = 0 + predict[predict >= 0.5] = 1 + mask = torch.cat([predict, predict, predict], dim=0).permute(1, 2, 0) + mask = mask.cpu().numpy() + mask = resize(mask, (h, w), anti_aliasing=False) + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + # predict_np = predict.clone().cpu().data.numpy() + im = Image.fromarray((mask[:, :, 0] * 255).astype(np.uint8)).convert('RGB') + save_path_ = output_dir / "mask.png" + im.save(save_path_) + + im_np = np.array(pil_im) + im_np = im_np / im_np.max() + im_np = mask * im_np + im_np[mask == 0] = 1 + im_final = (im_np / im_np.max() * 255).astype(np.uint8) + im_final = Image.fromarray(im_final) + + # free u2net + del net + torch.cuda.empty_cache() + + return im_final, predict diff --git a/pytorch_svgrender/painter/clipasso/u2net.py b/pytorch_svgrender/painter/clipasso/u2net.py new file mode 100644 index 0000000000000000000000000000000000000000..bcedd43ece5537921eb68a4715a076f7d4d0f7cd --- /dev/null +++ b/pytorch_svgrender/painter/clipasso/u2net.py @@ -0,0 +1,524 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class REBNCONV(nn.Module): + def __init__(self, in_ch=3, out_ch=3, dirate=1): + super(REBNCONV, self).__init__() + + self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate) + self.bn_s1 = nn.BatchNorm2d(out_ch) + self.relu_s1 = nn.ReLU(inplace=True) + + def forward(self, x): + hx = x + xout = self.relu_s1(self.bn_s1(self.conv_s1(hx))) + + return xout + + +## upsample tensor 'src' to have the same spatial size with tensor 'tar' +def _upsample_like(src, tar): + src = F.interpolate(src, size=tar.shape[2:], mode='bilinear') + + return src + + +### RSU-7 ### +class RSU7(nn.Module): # UNet07DRES(nn.Module): + + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU7, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + hx = self.pool5(hx5) + + hx6 = self.rebnconv6(hx) + + hx7 = self.rebnconv7(hx6) + + hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1)) + hx6dup = _upsample_like(hx6d, hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-6 ### +class RSU6(nn.Module): # UNet06DRES(nn.Module): + + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU6, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + hx = self.pool4(hx4) + + hx5 = self.rebnconv5(hx) + + hx6 = self.rebnconv6(hx5) + + hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-5 ### +class RSU5(nn.Module): # UNet05DRES(nn.Module): + + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU5, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + hx = self.pool3(hx3) + + hx4 = self.rebnconv4(hx) + + hx5 = self.rebnconv5(hx4) + + hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4 ### +class RSU4(nn.Module): # UNet04DRES(nn.Module): + + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1) + self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx = self.pool1(hx1) + + hx2 = self.rebnconv2(hx) + hx = self.pool2(hx2) + + hx3 = self.rebnconv3(hx) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1)) + + return hx1d + hxin + + +### RSU-4F ### +class RSU4F(nn.Module): # UNet04FRES(nn.Module): + + def __init__(self, in_ch=3, mid_ch=12, out_ch=3): + super(RSU4F, self).__init__() + + self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) + + self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1) + self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2) + self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4) + + self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8) + + self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4) + self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2) + self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1) + + def forward(self, x): + hx = x + + hxin = self.rebnconvin(hx) + + hx1 = self.rebnconv1(hxin) + hx2 = self.rebnconv2(hx1) + hx3 = self.rebnconv3(hx2) + + hx4 = self.rebnconv4(hx3) + + hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1)) + hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1)) + hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1)) + + return hx1d + hxin + + +##### U^2-Net #### +class U2NET(nn.Module): + + def __init__(self, in_ch=3, out_ch=1): + super(U2NET, self).__init__() + + self.stage1 = RSU7(in_ch, 32, 64) + self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage2 = RSU6(64, 32, 128) + self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage3 = RSU5(128, 64, 256) + self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage4 = RSU4(256, 128, 512) + self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage5 = RSU4F(512, 256, 512) + self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage6 = RSU4F(512, 256, 512) + + # decoder + self.stage5d = RSU4F(1024, 256, 512) + self.stage4d = RSU4(1024, 128, 256) + self.stage3d = RSU5(512, 64, 128) + self.stage2d = RSU6(256, 32, 64) + self.stage1d = RSU7(128, 16, 64) + + self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side2 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side3 = nn.Conv2d(128, out_ch, 3, padding=1) + self.side4 = nn.Conv2d(256, out_ch, 3, padding=1) + self.side5 = nn.Conv2d(512, out_ch, 3, padding=1) + self.side6 = nn.Conv2d(512, out_ch, 3, padding=1) + + self.outconv = nn.Conv2d(6 * out_ch, out_ch, 1) + + def forward(self, x): + hx = x + + # stage 1 + hx1 = self.stage1(hx) + hx = self.pool12(hx1) + + # stage 2 + hx2 = self.stage2(hx) + hx = self.pool23(hx2) + + # stage 3 + hx3 = self.stage3(hx) + hx = self.pool34(hx3) + + # stage 4 + hx4 = self.stage4(hx) + hx = self.pool45(hx4) + + # stage 5 + hx5 = self.stage5(hx) + hx = self.pool56(hx5) + + # stage 6 + hx6 = self.stage6(hx) + hx6up = _upsample_like(hx6, hx5) + + # -------------------- decoder -------------------- + hx5d = self.stage5d(torch.cat((hx6up, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1)) + + # side output + d1 = self.side1(hx1d) + + d2 = self.side2(hx2d) + d2 = _upsample_like(d2, d1) + + d3 = self.side3(hx3d) + d3 = _upsample_like(d3, d1) + + d4 = self.side4(hx4d) + d4 = _upsample_like(d4, d1) + + d5 = self.side5(hx5d) + d5 = _upsample_like(d5, d1) + + d6 = self.side6(hx6) + d6 = _upsample_like(d6, d1) + + d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1)) + + return torch.sigmoid(d0), torch.sigmoid(d1), torch.sigmoid(d2), \ + torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), \ + torch.sigmoid(d6) + + +### U^2-Net small ### +class U2NETP(nn.Module): + + def __init__(self, in_ch=3, out_ch=1): + super(U2NETP, self).__init__() + + self.stage1 = RSU7(in_ch, 16, 64) + self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage2 = RSU6(64, 16, 64) + self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage3 = RSU5(64, 16, 64) + self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage4 = RSU4(64, 16, 64) + self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage5 = RSU4F(64, 16, 64) + self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.stage6 = RSU4F(64, 16, 64) + + # decoder + self.stage5d = RSU4F(128, 16, 64) + self.stage4d = RSU4(128, 16, 64) + self.stage3d = RSU5(128, 16, 64) + self.stage2d = RSU6(128, 16, 64) + self.stage1d = RSU7(128, 16, 64) + + self.side1 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side2 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side3 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side4 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side5 = nn.Conv2d(64, out_ch, 3, padding=1) + self.side6 = nn.Conv2d(64, out_ch, 3, padding=1) + + self.outconv = nn.Conv2d(6 * out_ch, out_ch, 1) + + def forward(self, x): + hx = x + + # stage 1 + hx1 = self.stage1(hx) + hx = self.pool12(hx1) + + # stage 2 + hx2 = self.stage2(hx) + hx = self.pool23(hx2) + + # stage 3 + hx3 = self.stage3(hx) + hx = self.pool34(hx3) + + # stage 4 + hx4 = self.stage4(hx) + hx = self.pool45(hx4) + + # stage 5 + hx5 = self.stage5(hx) + hx = self.pool56(hx5) + + # stage 6 + hx6 = self.stage6(hx) + hx6up = _upsample_like(hx6, hx5) + + # decoder + hx5d = self.stage5d(torch.cat((hx6up, hx5), 1)) + hx5dup = _upsample_like(hx5d, hx4) + + hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1)) + hx4dup = _upsample_like(hx4d, hx3) + + hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1)) + hx3dup = _upsample_like(hx3d, hx2) + + hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1)) + hx2dup = _upsample_like(hx2d, hx1) + + hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1)) + + # side output + d1 = self.side1(hx1d) + + d2 = self.side2(hx2d) + d2 = _upsample_like(d2, d1) + + d3 = self.side3(hx3d) + d3 = _upsample_like(d3, d1) + + d4 = self.side4(hx4d) + d4 = _upsample_like(d4, d1) + + d5 = self.side5(hx5d) + d5 = _upsample_like(d5, d1) + + d6 = self.side6(hx6) + d6 = _upsample_like(d6, d1) + + d0 = self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1)) + + return torch.sigmoid(d0), torch.sigmoid(d1), torch.sigmoid(d2), \ + torch.sigmoid(d3), torch.sigmoid(d4), torch.sigmoid(d5), \ + torch.sigmoid(d6) diff --git a/pytorch_svgrender/painter/clipdraw/__init__.py b/pytorch_svgrender/painter/clipdraw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..170a07990cabfa82222de00950a048cc076b0dbf --- /dev/null +++ b/pytorch_svgrender/painter/clipdraw/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: __init__.py +# Copyright (c) 2024, XiMing Xing. +# License: MPL-2.0 License + +from .painter_params import Painter, PainterOptimizer diff --git a/pytorch_svgrender/painter/clipdraw/painter_params.py b/pytorch_svgrender/painter/clipdraw/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..76bd0faa5c8be626ad44b3380043cd9815f8f414 --- /dev/null +++ b/pytorch_svgrender/painter/clipdraw/painter_params.py @@ -0,0 +1,189 @@ +import random +import pathlib + +import omegaconf +import pydiffvg +import torch + +from pytorch_svgrender.diffvg_warp import DiffVGState + + +class Painter(DiffVGState): + + def __init__( + self, + method_cfg: omegaconf.DictConfig, + diffvg_cfg: omegaconf.DictConfig, + num_strokes: int = 4, + canvas_size: int = 224, + device: torch.device = None, + ): + super(Painter, self).__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + self.method_cfg = method_cfg + + self.num_paths = num_strokes + self.max_width = method_cfg.max_width + self.num_stages = method_cfg.num_stages + + self.black_stroke_color = method_cfg.black_stroke_color + + self.path_svg = method_cfg.path_svg + self.strokes_per_stage = self.num_paths + self.optimize_flag = [] + + self.strokes_counter = 0 # counts the number of calls to "get_path" + + def init_image(self, stage=0): + if stage > 0: + # Noting: if multi stages training than add new strokes on existing ones + # don't optimize on previous strokes + self.optimize_flag = [False for i in range(len(self.shapes))] + for i in range(self.strokes_per_stage): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag.append(True) + else: + num_paths_exists = 0 + if self.path_svg is not None and pathlib.Path(self.path_svg).exists(): + print(f"-> init svg from '{self.path_svg}' ...") + + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(self.path_svg) + # if you want to add more strokes to existing ones and optimize on all of them + num_paths_exists = len(self.shapes) + + for i in range(num_paths_exists, self.num_paths): + if self.black_stroke_color: + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + else: + stroke_color = torch.tensor([random.random(), random.random(), random.random(), random.random()]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag = [True for i in range(len(self.shapes))] + + img = self.render_warp() + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + + return img + + def get_image(self, step=0): + img = self.render_warp(step) + opacity = img[:, :, 3:4] + img = opacity * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - opacity) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self): + num_segments = random.randint(1, 3) + num_control_points = torch.zeros(num_segments, dtype=torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + + for j in range(num_segments): + radius = 0.1 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + points.append(p3) + p0 = p3 + points = torch.tensor(points).to(self.device) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + + path = pydiffvg.Path(num_control_points=num_control_points, + points=points, + stroke_width=torch.tensor(1.0), + is_closed=False) + self.strokes_counter += 1 + return path + + def clip_curve_shape(self): + for path in self.shapes: + path.stroke_width.data.clamp_(1.0, self.max_width) + for group in self.shape_groups: + group.stroke_color.data.clamp_(0.0, 1.0) + + def set_parameters(self): + # stroke`s location and width optimization + self.point_vars = [] + self.width_vars = [] + for i, path in enumerate(self.shapes): + if self.optimize_flag[i]: + path.points.requires_grad = True + self.point_vars.append(path.points) + path.stroke_width.requires_grad = True + self.width_vars.append(path.stroke_width) + + # for stroke' color optimization + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if self.optimize_flag[i]: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + return self.point_vars, self.width_vars, self.color_vars + + def learnable_parameters(self): + return self.point_vars + self.width_vars + self.color_vars + + def save_svg(self, output_dir, name): + pydiffvg.save_svg('{}/{}.svg'.format(output_dir, name), + self.canvas_width, self.canvas_height, + self.shapes, self.shape_groups) + + +class PainterOptimizer: + + def __init__(self, renderer: Painter, points_lr: float, width_lr: float, color_lr: float): + self.renderer = renderer + + self.points_lr = points_lr + self.width_lr = width_lr + self.color_lr = color_lr + + self.points_optimizer, self.width_optimizer, self.color_optimizer = None, None, None + + def init_optimizers(self): + point_vars, width_vars, color_vars = self.renderer.set_parameters() + self.points_optimizer = torch.optim.Adam(point_vars, lr=self.points_lr) + self.width_optimizer = torch.optim.Adam(width_vars, lr=self.width_lr) + self.color_optimizer = torch.optim.Adam(color_vars, lr=self.color_lr) + + def update_lr(self, step, decay_steps=(500, 750)): + if step % decay_steps[0] == 0: + for param_group in self.points_optimizer.param_groups: + param_group['lr'] = 0.4 + if step % decay_steps[1] == 0: + for param_group in self.points_optimizer.param_groups: + param_group['lr'] = 0.1 + + def zero_grad_(self): + self.points_optimizer.zero_grad() + self.width_optimizer.zero_grad() + self.color_optimizer.zero_grad() + + def step_(self): + self.points_optimizer.step() + self.width_optimizer.step() + self.color_optimizer.step() + + def get_lr(self): + return self.points_optimizer.param_groups[0]['lr'] diff --git a/pytorch_svgrender/painter/clipfont/__init__.py b/pytorch_svgrender/painter/clipfont/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..88977e8c18d25bdcb71fbebef8df9bca679a35c4 --- /dev/null +++ b/pytorch_svgrender/painter/clipfont/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: SVGDreamer - __init__.py +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +from .template import imagenet_templates, compose_text_with_templates +from .painter_params import Painter, PainterOptimizer \ No newline at end of file diff --git a/pytorch_svgrender/painter/clipfont/painter_params.py b/pytorch_svgrender/painter/clipfont/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..f94e6837d501ff1adbf5cfed71898b1bc44e5072 --- /dev/null +++ b/pytorch_svgrender/painter/clipfont/painter_params.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: SVG Painter and ist optimizer + +from typing import Tuple + +import omegaconf +import pydiffvg +import torch +import numpy as np + +from pytorch_svgrender.diffvg_warp import DiffVGState +from pytorch_svgrender.utils import get_rgb_from_color + + +class Painter(DiffVGState): + + def __init__(self, device=None): + super().__init__(device) + self.device = device + + self.strokes_counter = 0 # num of paths + + def init_shapes(self, path_svg, reinit_cfg: omegaconf.DictConfig = None): + print(f"-> init svg from `{path_svg}` ...") + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(path_svg) + self.strokes_counter = len(self.shapes) + + """re-init font color""" + if reinit_cfg is not None: + self.color_init(reinit_cfg) + + img = self.render_warp() + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def color_init(self, reinit_cfg: omegaconf.DictConfig): + if not reinit_cfg.reinit: + return + + if reinit_cfg.reinit_color == 'randn': + for i, group in enumerate(self.shape_groups): + color_val = np.random.random(size=3).tolist() + [1.0] + group.fill_color = torch.FloatTensor(color_val) + elif reinit_cfg.reinit_color == 'randn_all': + color_val = np.random.random(size=3).tolist() + [1.0] + for i, group in enumerate(self.shape_groups): + group.fill_color = torch.FloatTensor(color_val) + else: + rgb = get_rgb_from_color(str(reinit_cfg.reinit_color)) + color_val = list(rgb) + [1.0] + for i, group in enumerate(self.shape_groups): + group.fill_color = torch.FloatTensor(color_val) + + def clip_curve_shape(self): + for group in self.shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + # force opacity + group.fill_color.data[-1] = 1.0 + + def get_image(self): + img = self.render_warp() + opacity = img[:, :, 3:4] + img = opacity * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - opacity) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def set_parameters(self): + self.point_vars = [] + # the strokes point optimization + for i, path in enumerate(self.shapes): + path.points.requires_grad = True + self.point_vars.append(path.points) + + # the strokes color optimization + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if group.fill_color is not None: + group.fill_color.requires_grad = True + self.color_vars.append(group.fill_color) + if group.stroke_color is not None: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + def get_point_parameters(self): + return self.point_vars + + def get_color_parameters(self): + return self.color_vars + + def pretty_save_svg(self, filename, width=None, height=None, shapes=None, shape_groups=None): + width = self.canvas_width if width is None else width + height = self.canvas_height if height is None else height + shapes = self.shapes if shapes is None else shapes + shape_groups = self.shape_groups if shape_groups is None else shape_groups + + self.save_svg(filename, width, height, shapes, shape_groups, use_gamma=False, background=None) + + def load_svg(self, path_svg): + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(path_svg) + return canvas_width, canvas_height, shapes, shape_groups + + +class PainterOptimizer: + + def __init__(self, renderer: Painter, lr_cfg: omegaconf.DictConfig): + self.renderer = renderer + self.point_lr = lr_cfg.point + self.color_lr = lr_cfg.color + self.point_optimizer, self.color_optimizer = None, None + + def init_optimizers(self): + self.renderer.set_parameters() + self.point_optimizer = torch.optim.Adam([ + {'params': self.renderer.get_point_parameters(), 'lr': self.point_lr}]) + self.color_optimizer = torch.optim.Adam([ + {'params': self.renderer.get_color_parameters(), 'lr': self.color_lr}]) + + def update_lr(self, step): + pass + + def zero_grad_(self): + self.point_optimizer.zero_grad() + self.color_optimizer.zero_grad() + + def step_(self): + self.point_optimizer.step() + self.color_optimizer.step() + + def get_lr(self) -> Tuple[float, float]: + return self.point_optimizer.param_groups[0]['lr'], self.color_optimizer.param_groups[0]['lr'] diff --git a/pytorch_svgrender/painter/clipfont/template.py b/pytorch_svgrender/painter/clipfont/template.py new file mode 100644 index 0000000000000000000000000000000000000000..e35cc26c12580282b42933e34a244187da309c5b --- /dev/null +++ b/pytorch_svgrender/painter/clipfont/template.py @@ -0,0 +1,87 @@ +from typing import List + +imagenet_templates = [ + 'a bad photo of a {}.', + 'a sculpture of a {}.', + 'a photo of the hard to see {}.', + 'a low resolution photo of the {}.', + 'a rendering of a {}.', + 'graffiti of a {}.', + 'a bad photo of the {}.', + 'a cropped photo of the {}.', + 'a tattoo of a {}.', + 'the embroidered {}.', + 'a photo of a hard to see {}.', + 'a bright photo of a {}.', + 'a photo of a clean {}.', + 'a photo of a dirty {}.', + 'a dark photo of the {}.', + 'a drawing of a {}.', + 'a photo of my {}.', + 'the plastic {}.', + 'a photo of the cool {}.', + 'a close-up photo of a {}.', + 'a black and white photo of the {}.', + 'a painting of the {}.', + 'a painting of a {}.', + 'a pixelated photo of the {}.', + 'a sculpture of the {}.', + 'a bright photo of the {}.', + 'a cropped photo of a {}.', + 'a plastic {}.', + 'a photo of the dirty {}.', + 'a jpeg corrupted photo of a {}.', + 'a blurry photo of the {}.', + 'a photo of the {}.', + 'a good photo of the {}.', + 'a rendering of the {}.', + 'a {} in a video game.', + 'a photo of one {}.', + 'a doodle of a {}.', + 'a close-up photo of the {}.', + 'a photo of a {}.', + 'the origami {}.', + 'the {} in a video game.', + 'a sketch of a {}.', + 'a doodle of the {}.', + 'a origami {}.', + 'a low resolution photo of a {}.', + 'the toy {}.', + 'a rendition of the {}.', + 'a photo of the clean {}.', + 'a photo of a large {}.', + 'a rendition of a {}.', + 'a photo of a nice {}.', + 'a photo of a weird {}.', + 'a blurry photo of a {}.', + 'a cartoon {}.', + 'art of a {}.', + 'a sketch of the {}.', + 'a embroidered {}.', + 'a pixelated photo of a {}.', + 'itap of the {}.', + 'a jpeg corrupted photo of the {}.', + 'a good photo of a {}.', + 'a plushie {}.', + 'a photo of the nice {}.', + 'a photo of the small {}.', + 'a photo of the weird {}.', + 'the cartoon {}.', + 'art of the {}.', + 'a drawing of the {}.', + 'a photo of the large {}.', + 'a black and white photo of a {}.', + 'the plushie {}.', + 'a dark photo of a {}.', + 'itap of a {}.', + 'graffiti of the {}.', + 'a toy {}.', + 'itap of my {}.', + 'a photo of a cool {}.', + 'a photo of a small {}.', + 'a tattoo of the {}.', +] + + +def compose_text_with_templates(text: str, templates: List[str] = imagenet_templates) -> List: + return [template.format(text) for template in templates] diff --git a/pytorch_svgrender/painter/diffsketcher/ASDS_SDXL_pipeline.py b/pytorch_svgrender/painter/diffsketcher/ASDS_SDXL_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..04c7334ea80f84035ef9025f18e2114b76709864 --- /dev/null +++ b/pytorch_svgrender/painter/diffsketcher/ASDS_SDXL_pipeline.py @@ -0,0 +1,673 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import PIL +from PIL import Image +from typing import Callable, List, Optional, Union, Tuple, AnyStr + +import numpy as np +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd +from torchvision import transforms +from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput +from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipeline + +from pytorch_svgrender.token2attn.attn_control import AttentionStore +from pytorch_svgrender.token2attn.ptp_utils import text_under_image, view_images + + +class Token2AttnMixinASDSSDXLPipeline(StableDiffusionXLPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + controller: AttentionStore = None, # feed attention_store as control of ptp + num_inference_steps: int = 50, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + self.register_attention_control(controller) # add attention controller + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, prompt_2, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + text_embeddings, + negative_text_embeddings, + pooled_text_embeddings, + negative_pooled_text_embeddings, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + try: + num_channels_latents = self.unet.config.in_channels + except Exception or Warning: + num_channels_latents = self.unet.in_channels + + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + text_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. inherit TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeddings = pooled_text_embeddings + add_time_ids = self._get_add_time_ids( + original_size, crops_coords_top_left, target_size, dtype=text_embeddings.dtype + ) + + if do_classifier_free_guidance: + text_embeddings = torch.cat([negative_text_embeddings, text_embeddings], dim=0) + add_text_embeddings = torch.cat([negative_pooled_text_embeddings, add_text_embeddings], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + text_embeddings = text_embeddings.to(device) + add_text_embeddings = add_text_embeddings.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + + # 8.1 Apply denoising_end + if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeddings, "time_ids": add_time_ids} + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=text_embeddings, + added_cond_kwargs=added_cond_kwargs + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # step callback + latents = controller.step_callback(latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 9. Post-processing + + # The decode_latents method is deprecated and has been removed in sdxl + # image = self.decode_latents(latents) + + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) + + def encode2latents(self, + image, + batch_size, + num_images_per_prompt, + dtype, + device, + generator=None): + if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): + raise ValueError( + f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" + ) + + # Offload text encoder if `enable_model_cpu_offload` was enabled + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.text_encoder_2.to("cpu") + torch.cuda.empty_cache() + + image = image.to(device=device, dtype=dtype) + + batch_size = batch_size * num_images_per_prompt + + if image.shape[1] == 4: + init_latents = image + else: + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.config.force_upcast: + image = image.float() + self.vae.to(dtype=torch.float32) + + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + elif isinstance(generator, list): + init_latents = [ + self.vae.encode(image[i: i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) + ] + init_latents = torch.cat(init_latents, dim=0) + else: + init_latents = self.vae.encode(image).latent_dist.sample(generator) + + if self.vae.config.force_upcast: + self.vae.to(dtype) + + init_latents = init_latents.to(dtype) + init_latents = self.vae.config.scaling_factor * init_latents + + if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: + # expand init_latents for batch_size + additional_image_per_prompt = batch_size // init_latents.shape[0] + init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) + elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: + raise ValueError( + f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." + ) + else: + init_latents = torch.cat([init_latents], dim=0) + + latents = init_latents + + return latents + + @staticmethod + def S_aug(sketch: torch.Tensor, + im_res: int = 1024, + augments: str = "affine_contrast"): + # init augmentations + augment_list = [] + if "affine" in augments: + augment_list.append( + transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5) + ) + augment_list.append( + transforms.RandomResizedCrop(im_res, scale=(0.8, 0.8), ratio=(1.0, 1.0)) + ) + if "contrast" in augments: + # 2: increases the sharpness by a factor of 2. + augment_list.append( + transforms.RandomAdjustSharpness(sharpness_factor=2) + ) + augment_compose = transforms.Compose(augment_list) + + return augment_compose(sketch) + + def score_distillation_sampling(self, + pred_rgb: torch.Tensor, + crop_size: int, + augments: str, + prompt: Union[List, str], + prompt_2: Optional[Union[List, str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + negative_prompt: Union[List, str] = None, + negative_prompt_2: Optional[Union[List, str]] = None, + guidance_scale: float = 100, + as_latent: bool = False, + grad_scale: float = 1, + t_range: Union[List[float], Tuple[float]] = (0.05, 0.95), + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None): + + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + batch_size = 1 if isinstance(prompt, str) else len(prompt) + + num_train_timesteps = self.scheduler.config.num_train_timesteps + min_step = int(num_train_timesteps * t_range[0]) + max_step = int(num_train_timesteps * t_range[1]) + alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + num_images_per_prompt = 1 # the number of images to generate per prompt + + # Encode input prompt + do_classifier_free_guidance = guidance_scale > 1.0 + ( + text_embeddings, + negative_text_embeddings, + pooled_text_embeddings, + negative_pooled_text_embeddings, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=self.device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + ) + + # sketch augmentation + pred_rgb_a = self.S_aug(pred_rgb, crop_size, augments) + + # interp to 512x512 to be fed into vae. + if as_latent: + latents = F.interpolate(pred_rgb_a, (128, 128), mode='bilinear', align_corners=False) * 2 - 1 + else: + # encode image into latents via vae, requires grad! + latents = self.encode2latents( + pred_rgb_a, + batch_size, + num_images_per_prompt, + text_embeddings.dtype, + self.device + ) + + # timestep ~ U(0.05, 0.95) to avoid very high/low noise level + t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) + + # 7. Prepare added time ids & embeddings + add_text_embeddings = pooled_text_embeddings + add_time_ids = self._get_add_time_ids( + original_size, crops_coords_top_left, target_size, dtype=text_embeddings.dtype + ) + + if do_classifier_free_guidance: + text_embeddings = torch.cat([negative_text_embeddings, text_embeddings], dim=0) + add_text_embeddings = torch.cat([negative_pooled_text_embeddings, add_text_embeddings], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + text_embeddings = text_embeddings.to(self.device) + add_text_embeddings = add_text_embeddings.to(self.device) + add_time_ids = add_time_ids.to(self.device).repeat(batch_size * num_images_per_prompt, 1) + + # predict the noise residual with unet, stop gradient + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) if do_classifier_free_guidance else latents_noisy + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeddings, "time_ids": add_time_ids} + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=text_embeddings, + added_cond_kwargs=added_cond_kwargs + ).sample + + # perform guidance (high scale from paper!) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + + # w(t), sigma_t^2 + w = (1 - alphas[t]) + grad = grad_scale * w * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + # since we omitted an item in grad, we need to use the custom function to specify the gradient + loss = SpecifyGradient.apply(latents, grad) + + return loss, grad.mean() + + def register_attention_control(self, controller): + attn_procs = {} + cross_att_count = 0 + for name in self.unet.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = self.unet.config.block_out_channels[-1] + place_in_unet = "mid" + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id] + place_in_unet = "up" + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = self.unet.config.block_out_channels[block_id] + place_in_unet = "down" + else: + continue + cross_att_count += 1 + attn_procs[name] = P2PCrossAttnProcessor( + controller=controller, place_in_unet=place_in_unet + ) + + self.unet.set_attn_processor(attn_procs) + controller.num_att_layers = cross_att_count + + @staticmethod + def aggregate_attention(prompts, + attention_store: AttentionStore, + res: int, + from_where: List[str], + is_cross: bool, + select: int): + if isinstance(prompts, str): + prompts = [prompts] + assert isinstance(prompts, list) + + out = [] + attention_maps = attention_store.get_average_attention() + num_pixels = res ** 2 + for location in from_where: + for item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: + if item.shape[1] == num_pixels: + cross_maps = item.reshape(len(prompts), -1, res, res, item.shape[-1])[select] + out.append(cross_maps) + out = torch.cat(out, dim=0) + out = out.sum(0) / out.shape[0] + return out.cpu() + + def get_cross_attention(self, + prompts, + attention_store: AttentionStore, + res: int, + from_where: List[str], + select: int = 0, + save_path=None): + tokens = self.tokenizer.encode(prompts[select]) + decoder = self.tokenizer.decode + # shape: [res ** 2, res ** 2, seq_len] + attention_maps = self.aggregate_attention(prompts, attention_store, res, from_where, True, select) + + images = [] + for i in range(len(tokens)): + image = attention_maps[:, :, i] + image = 255 * image / image.max() + image = image.unsqueeze(-1).expand(*image.shape, 3) + image = image.numpy().astype(np.uint8) + image = np.array(Image.fromarray(image).resize((256, 256))) + image = text_under_image(image, decoder(int(tokens[i]))) + images.append(image) + image_array = np.stack(images, axis=0) + view_images(image_array, save_image=True, fp=save_path) + + return attention_maps, tokens + + def get_self_attention_comp(self, + prompts, + attention_store: AttentionStore, + res: int, + from_where: List[str], + img_size: int = 224, + max_com=10, + select: int = 0, + save_path: AnyStr = None): + attention_maps = self.aggregate_attention(prompts, attention_store, res, from_where, False, select) + attention_maps = attention_maps.numpy().reshape((res ** 2, res ** 2)) + # shape: [res ** 2, res ** 2] + u, s, vh = np.linalg.svd(attention_maps - np.mean(attention_maps, axis=1, keepdims=True)) + print(f"self-attention_maps: {attention_maps.shape}, " + f"u: {u.shape}, " + f"s: {s.shape}, " + f"vh: {vh.shape}") + + images = [] + vh_returns = [] + for i in range(max_com): + image = vh[i].reshape(res, res) + image = (image - image.min()) / (image.max() - image.min()) + image = 255 * image + + ret_ = Image.fromarray(image).resize((img_size, img_size), resample=PIL.Image.Resampling.BILINEAR) + vh_returns.append(np.array(ret_)) + + image = np.repeat(np.expand_dims(image, axis=2), 3, axis=2).astype(np.uint8) + image = Image.fromarray(image).resize((256, 256)) + image = np.array(image) + images.append(image) + image_array = np.stack(images, axis=0) + view_images(image_array, num_rows=max_com // 10, offset_ratio=0, + save_image=True, fp=save_path / "self-attn-vh.png") + + return attention_maps, (u, s, vh), np.stack(vh_returns, axis=0) + + +class P2PCrossAttnProcessor: + + def __init__(self, controller, place_in_unet): + super().__init__() + self.controller = controller + self.place_in_unet = place_in_unet + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size=batch_size) + + query = attn.to_q(hidden_states) + + is_cross = encoder_hidden_states is not None + encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + + # one line change + self.controller(attention_probs, is_cross, self.place_in_unet) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class SpecifyGradient(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, input_tensor, gt_grad): + ctx.save_for_backward(gt_grad) + # we return a dummy value 1, which will be scaled by amp's scaler so we get the scale in backward. + return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype) + + @staticmethod + @custom_bwd + def backward(ctx, grad_scale): + gt_grad, = ctx.saved_tensors + gt_grad = gt_grad * grad_scale + return gt_grad, None diff --git a/pytorch_svgrender/painter/diffsketcher/ASDS_pipeline.py b/pytorch_svgrender/painter/diffsketcher/ASDS_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..8ab6867990778e46fe16d0ec8c05c666e08445ff --- /dev/null +++ b/pytorch_svgrender/painter/diffsketcher/ASDS_pipeline.py @@ -0,0 +1,481 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import PIL +from PIL import Image +from typing import Callable, List, Optional, Union, Tuple, AnyStr + +import numpy as np +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd +from torchvision import transforms +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline + +from pytorch_svgrender.token2attn.attn_control import AttentionStore +from pytorch_svgrender.token2attn.ptp_utils import text_under_image, view_images + + +class Token2AttnMixinASDSPipeline(StableDiffusionPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + controller: AttentionStore = None, # feed attention_store as control of ptp + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + self.register_attention_control(controller) # add attention controller + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_embeddings = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + try: + num_channels_latents = self.unet.config.in_channels + except Exception or Warning: + num_channels_latents = self.unet.in_channels + + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + text_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. inherit TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # step callback + latents = controller.step_callback(latents) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # image = self.decode_latents(latents) + + # 8. Post-processing + # 9. Run safety checker + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + # 10. Convert to output_type + image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + def encode_(self, images): + images = (2 * images - 1).clamp(-1.0, 1.0) # images: [B, 3, H, W] + + # encode images + latents = self.vae.encode(images).latent_dist.sample() + latents = self.vae.config.scaling_factor * latents + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + return latents + + @staticmethod + def S_aug(sketch: torch.Tensor, + crop_size: int = 512, + augments: str = "affine_contrast"): + # init augmentations + augment_list = [] + if "affine" in augments: + augment_list.append( + transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5) + ) + augment_list.append( + transforms.RandomResizedCrop(crop_size, scale=(0.8, 0.8), ratio=(1.0, 1.0)) + ) + if "contrast" in augments: + # 2: increases the sharpness by a factor of 2. + augment_list.append( + transforms.RandomAdjustSharpness(sharpness_factor=2) + ) + augment_compose = transforms.Compose(augment_list) + + return augment_compose(sketch) + + def score_distillation_sampling(self, + pred_rgb: torch.Tensor, + crop_size: int, + augments: str, + prompt: Union[List, str], + negative_prompt: Union[List, str] = None, + guidance_scale: float = 100, + as_latent: bool = False, + grad_scale: float = 1, + t_range: Union[List[float], Tuple[float]] = (0.02, 0.98)): + num_train_timesteps = self.scheduler.config.num_train_timesteps + min_step = int(num_train_timesteps * t_range[0]) + max_step = int(num_train_timesteps * t_range[1]) + alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + # sketch augmentation + pred_rgb_a = self.S_aug(pred_rgb, crop_size, augments) + + # interp to crop_size x crop_size to be fed into vae. + if as_latent: + latents = F.interpolate(pred_rgb_a, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + else: + # encode image into latents with vae, requires grad! + latents = self.encode_(pred_rgb_a) + + # Encode input prompt + num_images_per_prompt = 1 # the number of images to generate per prompt + do_classifier_free_guidance = guidance_scale > 1.0 + text_embeddings = self._encode_prompt( + prompt, self.device, num_images_per_prompt, do_classifier_free_guidance, + negative_prompt=negative_prompt, + ) + + # timestep ~ U(0.02, 0.98) to avoid very high/low noise level + t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) + + # predict the noise residual with unet, stop gradient + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) if do_classifier_free_guidance else latents_noisy + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + + # w(t), sigma_t^2 + w = (1 - alphas[t]) + grad = grad_scale * w * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + # since we omitted an item in grad, we need to use the custom function to specify the gradient + loss = SpecifyGradient.apply(latents, grad) + + return loss, grad.mean() + + def register_attention_control(self, controller): + attn_procs = {} + cross_att_count = 0 + for name in self.unet.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = self.unet.config.block_out_channels[-1] + place_in_unet = "mid" + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id] + place_in_unet = "up" + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = self.unet.config.block_out_channels[block_id] + place_in_unet = "down" + else: + continue + cross_att_count += 1 + attn_procs[name] = P2PCrossAttnProcessor( + controller=controller, place_in_unet=place_in_unet + ) + + self.unet.set_attn_processor(attn_procs) + controller.num_att_layers = cross_att_count + + @staticmethod + def aggregate_attention(prompts, + attention_store: AttentionStore, + res: int, + from_where: List[str], + is_cross: bool, + select: int): + if isinstance(prompts, str): + prompts = [prompts] + assert isinstance(prompts, list) + + out = [] + attention_maps = attention_store.get_average_attention() + num_pixels = res ** 2 + for location in from_where: + for item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]: + if item.shape[1] == num_pixels: + cross_maps = item.reshape(len(prompts), -1, res, res, item.shape[-1])[select] + out.append(cross_maps) + out = torch.cat(out, dim=0) + out = out.sum(0) / out.shape[0] + return out.cpu() + + def get_cross_attention(self, + prompts, + attention_store: AttentionStore, + res: int, + from_where: List[str], + select: int = 0, + save_path=None): + tokens = self.tokenizer.encode(prompts[select]) + decoder = self.tokenizer.decode + # shape: [res ** 2, res ** 2, seq_len] + attention_maps = self.aggregate_attention(prompts, attention_store, res, from_where, True, select) + + images = [] + for i in range(len(tokens)): + image = attention_maps[:, :, i] + image = 255 * image / image.max() + image = image.unsqueeze(-1).expand(*image.shape, 3) + image = image.numpy().astype(np.uint8) + image = np.array(Image.fromarray(image).resize((256, 256))) + image = text_under_image(image, decoder(int(tokens[i]))) + images.append(image) + image_array = np.stack(images, axis=0) + view_images(image_array, save_image=True, fp=save_path) + + return attention_maps, tokens + + def get_self_attention_comp(self, + prompts, + attention_store: AttentionStore, + res: int, + from_where: List[str], + img_size: int = 224, + max_com=10, + select: int = 0, + save_path: AnyStr = None): + attention_maps = self.aggregate_attention(prompts, attention_store, res, from_where, False, select) + attention_maps = attention_maps.numpy().reshape((res ** 2, res ** 2)) + # shape: [res ** 2, res ** 2] + u, s, vh = np.linalg.svd(attention_maps - np.mean(attention_maps, axis=1, keepdims=True)) + print(f"self-attention_maps: {attention_maps.shape}, " + f"u: {u.shape}, " + f"s: {s.shape}, " + f"vh: {vh.shape}") + + images = [] + vh_returns = [] + for i in range(max_com): + image = vh[i].reshape(res, res) + image = (image - image.min()) / (image.max() - image.min()) + image = 255 * image + + ret_ = Image.fromarray(image).resize((img_size, img_size), resample=PIL.Image.Resampling.BILINEAR) + vh_returns.append(np.array(ret_)) + + image = np.repeat(np.expand_dims(image, axis=2), 3, axis=2).astype(np.uint8) + image = Image.fromarray(image).resize((256, 256)) + image = np.array(image) + images.append(image) + image_array = np.stack(images, axis=0) + view_images(image_array, num_rows=max_com // 10, offset_ratio=0, + save_image=True, fp=save_path / "self-attn-vh.png") + + return attention_maps, (u, s, vh), np.stack(vh_returns, axis=0) + + +class P2PCrossAttnProcessor: + + def __init__(self, controller, place_in_unet): + super().__init__() + self.controller = controller + self.place_in_unet = place_in_unet + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size=batch_size) + + query = attn.to_q(hidden_states) + + is_cross = encoder_hidden_states is not None + encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + + # one line change + self.controller(attention_probs, is_cross, self.place_in_unet) + + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + return hidden_states + + +class SpecifyGradient(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, input_tensor, gt_grad): + ctx.save_for_backward(gt_grad) + # we return a dummy value 1, which will be scaled by amp's scaler so we get the scale in backward. + return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype) + + @staticmethod + @custom_bwd + def backward(ctx, grad_scale): + gt_grad, = ctx.saved_tensors + gt_grad = gt_grad * grad_scale + return gt_grad, None diff --git a/pytorch_svgrender/painter/diffsketcher/__init__.py b/pytorch_svgrender/painter/diffsketcher/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..301150cd7deeb2b097b4959b99f240a39a3632d9 --- /dev/null +++ b/pytorch_svgrender/painter/diffsketcher/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .painter_params import Painter, SketchPainterOptimizer +from .ASDS_pipeline import Token2AttnMixinASDSPipeline +from .ASDS_SDXL_pipeline import Token2AttnMixinASDSSDXLPipeline + +__all__ = [ + 'Painter', 'SketchPainterOptimizer', + 'Token2AttnMixinASDSPipeline', + 'Token2AttnMixinASDSSDXLPipeline' +] diff --git a/pytorch_svgrender/painter/diffsketcher/painter_params.py b/pytorch_svgrender/painter/diffsketcher/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..97e5743ee5bb9dfc870c205c74720c4b6431ee77 --- /dev/null +++ b/pytorch_svgrender/painter/diffsketcher/painter_params.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import random +import pathlib + +import omegaconf +import pydiffvg +import numpy as np +import torch + +from pytorch_svgrender.libs.modules.edge_map.DoG import XDoG +from pytorch_svgrender.diffvg_warp import DiffVGState + + +class Painter(DiffVGState): + + def __init__( + self, + cfg: omegaconf.DictConfig, + diffvg_cfg: omegaconf.DictConfig, + num_strokes: int = 4, + num_segments: int = 4, + canvas_size: int = 224, + device: torch.device = None, + target_im: torch.Tensor = None, + attention_map: torch.Tensor = None, + mask: torch.Tensor = None, + ): + super(Painter, self).__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + + self.num_paths = num_strokes + self.num_segments = num_segments + self.width = cfg.width + self.max_width = cfg.max_width + self.optim_width = cfg.optim_width + self.control_points_per_seg = cfg.control_points_per_seg + self.optim_rgba = cfg.optim_rgba + self.optim_alpha = cfg.optim_opacity + self.num_stages = cfg.num_stages + self.softmax_temp = cfg.softmax_temp + + self.shapes = [] + self.shape_groups = [] + self.num_control_points = 0 + self.color_vars_threshold = cfg.color_vars_threshold + + self.path_svg = cfg.path_svg + self.strokes_per_stage = self.num_paths + self.optimize_flag = [] + + # attention related for strokes initialisation + self.attention_init = cfg.attention_init + self.xdog_intersec = cfg.xdog_intersec + + self.GT_input = target_im + self.mask = mask + self.attention_map = attention_map if self.attention_init else None + + self.thresh = self.set_attention_threshold_map() if self.attention_init else None + self.strokes_counter = 0 # counts the number of calls to "get_path" + + def init_image(self, stage=0): + if stage > 0: + # Noting: if multi stages training than add new strokes on existing ones + # don't optimize on previous strokes + self.optimize_flag = [False for i in range(len(self.shapes))] + for i in range(self.strokes_per_stage): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag.append(True) + else: + num_paths_exists = 0 + if self.path_svg is not None and pathlib.Path(self.path_svg).exists(): + print(f"-> init svg from `{self.path_svg}` ...") + + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(self.path_svg) + # if you want to add more strokes to existing ones and optimize on all of them + num_paths_exists = len(self.shapes) + + for i in range(num_paths_exists, self.num_paths): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag = [True for i in range(len(self.shapes))] + + img = self.render_warp() + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + + return img + + def get_image(self): + img = self.render_warp() + + opacity = img[:, :, 3:4] + img = opacity * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - opacity) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self): + self.num_control_points = torch.zeros(self.num_segments, dtype=torch.int32) + (self.control_points_per_seg - 2) + points = [] + p0 = self.inds_normalised[self.strokes_counter] if self.attention_init else (random.random(), random.random()) + points.append(p0) + + for j in range(self.num_segments): + radius = 0.05 + for k in range(self.control_points_per_seg - 1): + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + points.append(p1) + p0 = p1 + points = torch.tensor(points).to(self.device) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + + path = pydiffvg.Path(num_control_points=self.num_control_points, + points=points, + stroke_width=torch.tensor(self.width), + is_closed=False) + self.strokes_counter += 1 + return path + + def clip_curve_shape(self): + if self.optim_width: + for path in self.shapes: + path.stroke_width.data.clamp_(1.0, self.max_width) + if self.optim_rgba: + for group in self.shape_groups: + group.stroke_color.data.clamp_(0.0, 1.0) + else: + if self.optim_alpha: + for group in self.shape_groups: + # group.stroke_color.data: RGBA + group.stroke_color.data[:3].clamp_(0., 0.) # to force black stroke + group.stroke_color.data[-1].clamp_(0., 1.) # opacity + + def path_pruning(self): + for group in self.shape_groups: + group.stroke_color.data[-1] = (group.stroke_color.data[-1] >= self.color_vars_threshold).float() + + def set_points_parameters(self): + # stoke`s location optimization + self.point_vars = [] + for i, path in enumerate(self.shapes): + if self.optimize_flag[i]: + path.points.requires_grad = True + self.point_vars.append(path.points) + + def get_points_params(self): + return self.point_vars + + def set_width_parameters(self): + # stroke`s width optimization + self.width_vars = [] + for i, path in enumerate(self.shapes): + if self.optimize_flag[i]: + path.stroke_width.requires_grad = True + self.width_vars.append(path.stroke_width) + + def get_width_parameters(self): + return self.width_vars + + def set_color_parameters(self): + # for strokes color optimization (opacity) + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if self.optimize_flag[i]: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + def get_color_parameters(self): + return self.color_vars + + def save_svg(self, output_dir, fname): + pydiffvg.save_svg(f'{output_dir}/{fname}.svg', + self.canvas_width, + self.canvas_height, + self.shapes, + self.shape_groups) + + @staticmethod + def softmax(x, tau=0.2): + e_x = np.exp(x / tau) + return e_x / e_x.sum() + + def set_inds_ldm(self): + attn_map = (self.attention_map - self.attention_map.min()) / \ + (self.attention_map.max() - self.attention_map.min()) + + if self.xdog_intersec: + xdog = XDoG(k=10) + im_xdog = xdog(self.GT_input[0].permute(1, 2, 0).cpu().numpy()) + print(f"use XDoG, shape: {im_xdog.shape}") + intersec_map = (1 - im_xdog) * attn_map + attn_map = intersec_map + + attn_map_soft = np.copy(attn_map) + attn_map_soft[attn_map > 0] = self.softmax(attn_map[attn_map > 0], tau=self.softmax_temp) + + # select points + k = self.num_stages * self.num_paths + self.inds = np.random.choice(range(attn_map.flatten().shape[0]), + size=k, + replace=False, + p=attn_map_soft.flatten()) + self.inds = np.array(np.unravel_index(self.inds, attn_map.shape)).T + + self.inds_normalised = np.zeros(self.inds.shape) + self.inds_normalised[:, 0] = self.inds[:, 1] / self.canvas_width + self.inds_normalised[:, 1] = self.inds[:, 0] / self.canvas_height + self.inds_normalised = self.inds_normalised.tolist() + return attn_map_soft + + def set_attention_threshold_map(self): + return self.set_inds_ldm() + + def get_attn(self): + return self.attention_map + + def get_thresh(self): + return self.thresh + + def get_inds(self): + return self.inds + + def get_mask(self): + return self.mask + + +class SketchPainterOptimizer: + + def __init__( + self, + renderer: Painter, + points_lr: float, + optim_alpha: bool, + optim_rgba: bool, + color_lr: float, + optim_width: bool, + width_lr: float + ): + self.renderer = renderer + + self.points_lr = points_lr + self.optim_color = optim_alpha or optim_rgba + self.color_lr = color_lr + self.optim_width = optim_width + self.width_lr = width_lr + + self.points_optimizer, self.width_optimizer, self.color_optimizer = None, None, None + + def init_optimizers(self): + self.renderer.set_points_parameters() + self.points_optimizer = torch.optim.Adam(self.renderer.get_points_params(), lr=self.points_lr) + if self.optim_color: + self.renderer.set_color_parameters() + self.color_optimizer = torch.optim.Adam(self.renderer.get_color_parameters(), lr=self.color_lr) + if self.optim_width: + self.renderer.set_width_parameters() + self.width_optimizer = torch.optim.Adam(self.renderer.get_width_parameters(), lr=self.width_lr) + + def update_lr(self, step, decay_steps=(500, 750)): + if step % decay_steps[0] == 0 and step > 0: + for param_group in self.points_optimizer.param_groups: + param_group['lr'] = 0.4 + if step % decay_steps[1] == 0 and step > 0: + for param_group in self.points_optimizer.param_groups: + param_group['lr'] = 0.1 + + def zero_grad_(self): + self.points_optimizer.zero_grad() + if self.optim_color: + self.color_optimizer.zero_grad() + if self.optim_width: + self.width_optimizer.zero_grad() + + def step_(self): + self.points_optimizer.step() + if self.optim_color: + self.color_optimizer.step() + if self.optim_width: + self.width_optimizer.step() + + def get_lr(self): + return self.points_optimizer.param_groups[0]['lr'] diff --git a/pytorch_svgrender/painter/diffsketcher/sketch_utils.py b/pytorch_svgrender/painter/diffsketcher/sketch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..078a804de9c12cd94ab6613895c5e2ae0577c342 --- /dev/null +++ b/pytorch_svgrender/painter/diffsketcher/sketch_utils.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import matplotlib.pyplot as plt +import numpy as np +import torch +from torchvision.utils import make_grid + + +def plt_triplet( + photos: torch.Tensor, + sketch: torch.Tensor, + style: torch.Tensor, + step: int, + prompt: str, + output_dir: str, + fname: str, # file name + dpi: int = 300 +): + if photos.shape != sketch.shape: + raise ValueError("photos and sketch must have the same dimensions") + + plt.figure() + plt.subplot(1, 3, 1) # nrows=1, ncols=3, index=1 + grid = make_grid(photos, normalize=True, pad_value=2) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + plt.imshow(ndarr) + plt.axis("off") + plt.title("Generated sample") + + plt.subplot(1, 3, 2) # nrows=1, ncols=3, index=2 + # style = (style + 1) / 2 + grid = make_grid(style, normalize=False, pad_value=2) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + plt.imshow(ndarr) + plt.axis("off") + plt.title(f"Style") + + plt.subplot(1, 3, 3) # nrows=1, ncols=3, index=2 + # sketch = (sketch + 1) / 2 + grid = make_grid(sketch, normalize=False, pad_value=2) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + plt.imshow(ndarr) + plt.axis("off") + plt.title(f"Rendering result - {step} steps") + + def insert_newline(string, point=9): + # split by blank + words = string.split() + if len(words) <= point: + return string + + word_chunks = [words[i:i + point] for i in range(0, len(words), point)] + new_string = "\n".join(" ".join(chunk) for chunk in word_chunks) + return new_string + + plt.suptitle(insert_newline(prompt), fontsize=10) + + plt.tight_layout() + plt.savefig(f"{output_dir}/{fname}.png", dpi=dpi) + plt.close() + + +def plt_attn(attn: np.array, + threshold_map: np.array, + inputs: torch.Tensor, + inds: np.array, + output_path: str): + # currently supports one image (and not a batch) + plt.figure(figsize=(10, 5)) + + plt.subplot(1, 3, 1) + main_im = make_grid(inputs, normalize=True, pad_value=2) + main_im = np.transpose(main_im.cpu().numpy(), (1, 2, 0)) + plt.imshow(main_im, interpolation='nearest') + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.title("input img") + plt.axis("off") + + plt.subplot(1, 3, 2) + plt.imshow(attn, interpolation='nearest', vmin=0, vmax=1) + plt.title("attn map") + plt.axis("off") + + plt.subplot(1, 3, 3) + threshold_map_ = (threshold_map - threshold_map.min()) / \ + (threshold_map.max() - threshold_map.min()) + plt.imshow(np.nan_to_num(threshold_map_), interpolation='nearest', vmin=0, vmax=1) + plt.title("prob softmax") + plt.scatter(inds[:, 1], inds[:, 0], s=10, c='red', marker='o') + plt.axis("off") + + plt.tight_layout() + plt.savefig(output_path) + plt.close() diff --git a/pytorch_svgrender/painter/diffsketcher/stroke_pruning.py b/pytorch_svgrender/painter/diffsketcher/stroke_pruning.py new file mode 100644 index 0000000000000000000000000000000000000000..15a1956aa90229639c42a649179b1e2d81439164 --- /dev/null +++ b/pytorch_svgrender/painter/diffsketcher/stroke_pruning.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import xml.etree.ElementTree as ET +import statistics + +import argparse + + +def paths_pruning(svg_file_path, output_file_path, opacity_delta=0.2): + try: + # Parse the SVG file + tree = ET.parse(svg_file_path) + namespace = "http://www.w3.org/2000/svg" + ET.register_namespace("", namespace) + + root = tree.getroot() + root.set('version', '1.1') + + paths = root.findall('.//{http://www.w3.org/2000/svg}path') + # Collect stroke-opacity attribute values + opacity_values = [] + for path in paths: + opacity = path.get("stroke-opacity") + if opacity is not None: + opacity_values.append(float(opacity)) + + # Calculate median opacity + median_opacity = statistics.median(opacity_values) + opacity_delta + + # Create a temporary list to store paths to be removed + paths_to_remove = [] + for path in paths: + opacity = path.get('stroke-opacity') + if opacity is not None and float(opacity) < median_opacity: + paths_to_remove.append(path) + + # Remove paths from the root element + for path in paths_to_remove: + path.set('stroke-opacity', '0') + + print(f"n_path: {len(paths)}, " + f"opacity_thresh: {median_opacity}, " + f"n_path_to_remove: {len(set(paths_to_remove))}.") + + # Save the modified SVG to the specified path + tree.write(output_file_path, encoding='utf-8', xml_declaration=True, default_namespace="") + # print("SVG file saved successfully.") + # print(f"file has been saved in: {output_file_path}") + except Exception as e: + print(f"An error occurred: {str(e)}") + + +if __name__ == '__main__': + """ + python process_svg.py -save ./xx.svg -tar ./xx.svg + """ + parser = argparse.ArgumentParser() + parser.add_argument("-tar", "--target_file", + default="", type=str, + help="the path of SVG file place.") + parser.add_argument("-save", "--save_path", + default="", type=str, + help="the path of processed SVG file place.") + parser.add_argument("-od", "--opacity_delta", + default=0.1, type=float) + args = parser.parse_args() + + paths_pruning(args.target_file, args.save_path, float(args.opacity_delta)) diff --git a/pytorch_svgrender/painter/diffvg/__init__.py b/pytorch_svgrender/painter/diffvg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..170a07990cabfa82222de00950a048cc076b0dbf --- /dev/null +++ b/pytorch_svgrender/painter/diffvg/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: __init__.py +# Copyright (c) 2024, XiMing Xing. +# License: MPL-2.0 License + +from .painter_params import Painter, PainterOptimizer diff --git a/pytorch_svgrender/painter/diffvg/painter_params.py b/pytorch_svgrender/painter/diffvg/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..2126a6bc68924dadc0f1eb1e6df62812edd6bc95 --- /dev/null +++ b/pytorch_svgrender/painter/diffvg/painter_params.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: DiffVG painter and optimizer +# Copyright (c) 2023, XiMing Xing. +# License: MPL-2.0 License + +import copy +import random +from typing import List + +import omegaconf +import numpy as np +import pydiffvg +import torch +from torch.optim.lr_scheduler import LambdaLR + +from pytorch_svgrender.diffvg_warp import DiffVGState + + +class Painter(DiffVGState): + + def __init__( + self, + target_img: torch.Tensor, + diffvg_cfg: omegaconf.DictConfig, + canvas_size: List, + path_type: str = 'unclosed', + max_width: float = 3.0, + device: torch.device = None, + ): + super(Painter, self).__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size[0], canvas_height=canvas_size[1]) + + self.target_img = target_img + self.path_type: str = path_type + self.max_width = max_width + self.train_stroke: bool = path_type == 'unclosed' + + self.strokes_counter: int = 0 # counts the number of calls to "get_path" + + def init_image(self, num_paths=0): + for i in range(num_paths): + path = self.get_path() + self.shapes.append(path) + self.shapes.append(path) + + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + stroke_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None if self.train_stroke else fill_color_init, + stroke_color=stroke_color_init if self.train_stroke else None + ) + self.shape_groups.append(path_group) + self.shape_groups.append(path_group) + + img = self.render_warp() + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) \ + * (1 - img[:, :, 3:4]) + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_image(self, step: int = 0): + img = self.render_warp(seed=step) + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) \ + * (1 - img[:, :, 3:4]) + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self): + if self.path_type == 'unclosed': + num_segments = random.randint(1, 3) + num_control_points = torch.zeros(num_segments, dtype=torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + for j in range(num_segments): + radius = 0.05 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + points.append(p3) + p0 = p3 + points = torch.tensor(points) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + # points = torch.rand(3 * num_segments + 1, 2) * min(canvas_width, canvas_height) + + path = pydiffvg.Path(num_control_points=num_control_points, + points=points, + stroke_width=torch.tensor(1.0), + is_closed=False) + elif self.path_type == 'closed': + num_segments = random.randint(3, 5) + num_control_points = torch.zeros(num_segments, dtype=torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + for j in range(num_segments): + radius = 0.05 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + if j < num_segments - 1: + points.append(p3) + p0 = p3 + points = torch.tensor(points) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + path = pydiffvg.Path(num_control_points=num_control_points, + points=points, + stroke_width=torch.tensor(1.0), + is_closed=True) + + self.strokes_counter += 1 + return path + + def clip_curve_shape(self): + if self.train_stroke: # open-form path + for path in self.shapes: + path.stroke_width.data.clamp_(1.0, self.max_width) + for group in self.shape_groups: + group.stroke_color.data.clamp_(0.0, 1.0) + else: # closed-form path + for group in self.shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + + def set_parameters(self): + # stroke`s location optimization + self.point_vars = [] + for i, path in enumerate(self.shapes): + path.points.requires_grad = True + self.point_vars.append(path.points) + + if self.train_stroke: + path.stroke_width.requires_grad = True + self.width_vars.append(path.stroke_width) + + # for stroke' color optimization + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if self.train_stroke: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + else: + group.fill_color.requires_grad = True + self.color_vars.append(group.fill_color) + + def get_point_parameters(self): + return self.point_vars + + def get_color_parameters(self): + return self.color_vars + + def get_stroke_parameters(self): + return self.width_vars, self.get_color_parameters() + + def save_svg(self, fpath): + pydiffvg.save_svg(f'{fpath}', self.canvas_width, self.canvas_height, self.shapes, self.shape_groups) + + +class LinearDecayLR: + + def __init__(self, decay_every, decay_ratio): + self.decay_every = decay_every + self.decay_ratio = decay_ratio + + def __call__(self, n): + decay_time = n // self.decay_every + decay_step = n % self.decay_every + lr_s = self.decay_ratio ** decay_time + lr_e = self.decay_ratio ** (decay_time + 1) + r = decay_step / self.decay_every + lr = lr_s * (1 - r) + lr_e * r + return lr + + +class PainterOptimizer: + + def __init__(self, + renderer: Painter, + num_iter: int, + lr_config: omegaconf.DictConfig, + trainable_stroke: bool = False): + self.renderer = renderer + self.num_iter = num_iter + self.trainable_stroke = trainable_stroke + + self.lr_base = { + 'point': lr_config.point, + 'color': lr_config.color, + 'stroke_width': lr_config.stroke_width, + 'stroke_color': lr_config.stroke_color, + } + + self.learnable_params = [] # list[Dict] + + self.optimizer = None + self.scheduler = None + + def init_optimizer(self): + # optimizers + params = {} + self.renderer.set_parameters() + params['point'] = self.renderer.get_point_parameters() + + if self.trainable_stroke: + params['stroke_width'], params['stroke_color'] = self.renderer.get_stroke_parameters() + else: + params['color'] = self.renderer.get_color_parameters() + + self.learnable_params = [ + {'params': params[ki], 'lr': self.lr_base[ki]} for ki in sorted(params.keys()) + ] + self.optimizer = torch.optim.Adam(self.learnable_params) + + # lr schedule + lr_lambda_fn = LinearDecayLR(self.num_iter, 0.4) + self.scheduler = LambdaLR(self.optimizer, lr_lambda=lr_lambda_fn, last_epoch=-1) + + def update_params(self, name: str, value: torch.tensor): + for param_group in self.learnable_params: + if param_group.get('_id') == name: + param_group['params'] = value + + def update_lr(self): + self.scheduler.step() + + def zero_grad_(self): + self.optimizer.zero_grad() + + def step_(self): + self.optimizer.step() + + def get_lr(self): + return self.optimizer.param_groups[0]['lr'] diff --git a/pytorch_svgrender/painter/live/__init__.py b/pytorch_svgrender/painter/live/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7568b4af7d075c86c3338e097b0490292f7c9b83 --- /dev/null +++ b/pytorch_svgrender/painter/live/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .painter_params import Painter, PainterOptimizer +from .xing_loss import xing_loss_fn diff --git a/pytorch_svgrender/painter/live/painter_params.py b/pytorch_svgrender/painter/live/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..b9b874de561cbfd938aaa4ecfa7de7d63585b7e0 --- /dev/null +++ b/pytorch_svgrender/painter/live/painter_params.py @@ -0,0 +1,442 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: LIVE painter and optimizer +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +import copy +import random + +import omegaconf +from omegaconf import DictConfig + +import cv2 +import numpy as np +import pydiffvg +import torch +from torch.optim.lr_scheduler import LambdaLR + +from pytorch_svgrender.diffvg_warp import DiffVGState + + +class Painter(DiffVGState): + + def __init__( + self, + target_img: torch.Tensor, + diffvg_cfg: omegaconf.DictConfig, + num_segments: int = 4, + segment_init: str = 'random', + radius: int = 5, + canvas_size=240, + trainable_bg: bool = False, + stroke: bool = False, + stroke_width: int = 3, + device: torch.device = None, + ): + super(Painter, self).__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + + self.target_img = target_img + + self.num_segments = num_segments + self.segment_init = segment_init + self.radius = radius + self.train_stroke = stroke + self.stroke_width = stroke_width + + self.points_vars = [] + self.stroke_width_vars = [] + self.stroke_color_vars = [] + self.color_vars = [] + + self.strokes_counter = 0 # counts the number of calls to "get_path" + + # Background + self.para_bg = torch.tensor([1., 1., 1.], requires_grad=trainable_bg, device=self.device) + + self.pos_init_method = None + + def component_wise_path_init(self, pred, init_type: str = 'sparse'): + assert self.target_img is not None # gt + + if init_type == 'random': + self.pos_init_method = RandomCoordInit(self.canvas_height, self.canvas_width) + elif init_type == 'sparse': + # when initialized for the first time, the render result is None + if pred is None: + pred = self.para_bg.view(1, -1, 1, 1).repeat(1, 1, self.canvas_height, self.canvas_width) + # then pred is the render result + self.pos_init_method = SparseCoordInit(pred, self.target_img) + elif init_type == 'naive': + if pred is None: + pred = self.para_bg.view(1, -1, 1, 1).repeat(1, 1, self.canvas_height, self.canvas_width) + self.pos_init_method = NaiveCoordInit(pred, self.target_img) + else: + raise NotImplementedError(f"'{init_type}' is not support.") + + def init_image(self, num_paths=0): + self.cur_shapes, self.cur_shape_groups = [], [] + + for i in range(num_paths): + path, color_ref = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + wref, href = color_ref + wref = max(0, min(int(wref), self.canvas_width - 1)) + href = max(0, min(int(href), self.canvas_height - 1)) + fill_color_init = list(self.target_img[0, :, href, wref]) + [1.] + fill_color_init = torch.FloatTensor(fill_color_init) + stroke_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None if self.train_stroke else fill_color_init, + stroke_color=stroke_color_init if self.train_stroke else None + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + img = self.render_warp() + img = img[:, :, 3:4] * img[:, :, :3] + self.para_bg * (1 - img[:, :, 3:4]) + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_image(self, step: int = 0): + img = self.render_warp(seed=step) + img = img[:, :, 3:4] * img[:, :, :3] + self.para_bg * (1 - img[:, :, 3:4]) + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self): + num_segments = self.num_segments + num_control_points = [2] * num_segments + + points = [] + # init segment + if self.segment_init == 'circle': + radius = self.radius if self.radius is not None else np.random.uniform(0.5, 1) + if self.pos_init_method is not None: + center = self.pos_init_method() + else: + center = (random.random(), random.random()) + bias = center + color_ref = copy.deepcopy(bias) + + avg_degree = 360 / (num_segments * 3) + for i in range(0, num_segments * 3): + point = ( + np.cos(np.deg2rad(i * avg_degree)), np.sin(np.deg2rad(i * avg_degree)) + ) + points.append(point) + + points = torch.FloatTensor(points) * radius + torch.FloatTensor(bias).unsqueeze(dim=0) + else: # 'random' init + p0 = self.pos_init_method() + color_ref = copy.deepcopy(p0) + points.append(p0) + for j in range(num_segments): + radius = self.radius + p1 = (p0[0] + radius * np.random.uniform(-0.5, 0.5), + p0[1] + radius * np.random.uniform(-0.5, 0.5)) + p2 = (p1[0] + radius * np.random.uniform(-0.5, 0.5), + p1[1] + radius * np.random.uniform(-0.5, 0.5)) + p3 = (p2[0] + radius * np.random.uniform(-0.5, 0.5), + p2[1] + radius * np.random.uniform(-0.5, 0.5)) + points.append(p1) + points.append(p2) + if j < num_segments - 1: + points.append(p3) + p0 = p3 + points = torch.FloatTensor(points) + + path = pydiffvg.Path( + num_control_points=torch.LongTensor(num_control_points), + points=points, + stroke_width=torch.tensor(float(self.stroke_width)) if self.train_stroke else torch.tensor(0.0), + is_closed=True + ) + + self.strokes_counter += 1 + return path, color_ref + + def clip_curve_shape(self): + for group in self.shape_groups: + if self.train_stroke: + group.stroke_color.data.clamp_(0.0, 1.0) + else: + group.fill_color.data.clamp_(0.0, 1.0) + + def calc_distance_weight(self, loss_weight_keep): + shapes_forsdf = copy.deepcopy(self.cur_shapes) + shape_groups_forsdf = copy.deepcopy(self.cur_shape_groups) + for si in shapes_forsdf: + si.stroke_width = torch.FloatTensor([0]).to(self.device) + for sg_idx, sgi in enumerate(shape_groups_forsdf): + sgi.fill_color = torch.FloatTensor([1, 1, 1, 1]).to(self.device) + sgi.shape_ids = torch.LongTensor([sg_idx]).to(self.device) + + sargs_forsdf = pydiffvg.RenderFunction.serialize_scene( + self.canvas_width, self.canvas_height, shapes_forsdf, shape_groups_forsdf + ) + _render = pydiffvg.RenderFunction.apply + with torch.no_grad(): + im_forsdf = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *sargs_forsdf) + + # use alpha channel is a trick to get 0-1 image + im_forsdf = (im_forsdf[:, :, 3]).detach().cpu().numpy() + loss_weight = get_sdf(im_forsdf, normalize='to1') + loss_weight += loss_weight_keep + loss_weight = np.clip(loss_weight, 0, 1) + loss_weight = torch.FloatTensor(loss_weight).to(self.device) + return loss_weight + + def set_parameters(self): + # stroke`s location optimization + self.points_vars = [] + for i, path in enumerate(self.cur_shapes): + path.points.requires_grad = True + self.points_vars.append(path.points) + + if self.train_stroke: + path.stroke_width.requires_grad = True + self.stroke_width_vars.append(path.stroke_width) + + # for stroke' color optimization + self.color_vars = [] + for i, group in enumerate(self.cur_shape_groups): + if self.train_stroke: + group.stroke_color.requires_grad = True + self.stroke_color_vars.append(group.stroke_color) + else: + group.fill_color.requires_grad = True + self.color_vars.append(group.fill_color) + + def get_point_parameters(self): + return self.points_vars + + def get_color_parameters(self): + return self.color_vars + + def get_stroke_parameters(self): + return self.stroke_width_vars, self.stroke_color_vars + + def get_bg_parameters(self): + return self.para_bg + + def save_svg(self, fpath): + pydiffvg.save_svg(f'{fpath}', + self.canvas_width, + self.canvas_height, + self.shapes, + self.shape_groups) + + +def get_sdf(phi, **kwargs): + import skfmm # local import + + phi = (phi - 0.5) * 2 + if (phi.max() <= 0) or (phi.min() >= 0): + return np.zeros(phi.shape).astype(np.float32) + sd = skfmm.distance(phi, dx=1) + + flip_negative = kwargs.get('flip_negative', True) + if flip_negative: + sd = np.abs(sd) + + truncate = kwargs.get('truncate', 10) + sd = np.clip(sd, -truncate, truncate) + # print(f"max sd value is: {sd.max()}") + + zero2max = kwargs.get('zero2max', True) + if zero2max and flip_negative: + sd = sd.max() - sd + elif zero2max: + raise ValueError + + normalize = kwargs.get('normalize', 'sum') + if normalize == 'sum': + sd /= sd.sum() + elif normalize == 'to1': + sd /= sd.max() + return sd + + +class SparseCoordInit: + + def __init__(self, pred, gt, format='[bs x c x 2D]', quantile_interval=200, nodiff_thres=0.1): + if torch.is_tensor(pred): + pred = pred.detach().cpu().numpy() + if torch.is_tensor(gt): + gt = gt.detach().cpu().numpy() + + if format == '[bs x c x 2D]': + self.map = ((pred[0] - gt[0]) ** 2).sum(0) + self.reference_gt = copy.deepcopy(np.transpose(gt[0], (1, 2, 0))) + elif format == ['[2D x c]']: + self.map = (np.abs(pred - gt)).sum(-1) + self.reference_gt = copy.deepcopy(gt[0]) + else: + raise ValueError + + # OptionA: Zero too small errors to avoid the error too small deadloop + self.map[self.map < nodiff_thres] = 0 + quantile_interval = np.linspace(0., 1., quantile_interval) + quantized_interval = np.quantile(self.map, quantile_interval) + # remove redundant + quantized_interval = np.unique(quantized_interval) + quantized_interval = sorted(quantized_interval[1:-1]) + self.map = np.digitize(self.map, quantized_interval, right=False) + self.map = np.clip(self.map, 0, 255).astype(np.uint8) + self.idcnt = {} + for idi in sorted(np.unique(self.map)): + self.idcnt[idi] = (self.map == idi).sum() + # remove smallest one to remove the correct region + self.idcnt.pop(min(self.idcnt.keys())) + + def __call__(self): + if len(self.idcnt) == 0: + h, w = self.map.shape + return [np.random.uniform(0, 1) * w, np.random.uniform(0, 1) * h] + + target_id = max(self.idcnt, key=self.idcnt.get) + _, component, cstats, ccenter = cv2.connectedComponentsWithStats( + (self.map == target_id).astype(np.uint8), + connectivity=4 + ) + # remove cid = 0, it is the invalid area + csize = [ci[-1] for ci in cstats[1:]] + target_cid = csize.index(max(csize)) + 1 + center = ccenter[target_cid][::-1] + coord = np.stack(np.where(component == target_cid)).T + dist = np.linalg.norm(coord - center, axis=1) + target_coord_id = np.argmin(dist) + coord_h, coord_w = coord[target_coord_id] + + # replace_sampling + self.idcnt[target_id] -= max(csize) + if self.idcnt[target_id] == 0: + self.idcnt.pop(target_id) + self.map[component == target_cid] = 0 + return [coord_w, coord_h] + + +class RandomCoordInit: + def __init__(self, canvas_width, canvas_height): + self.canvas_width, self.canvas_height = canvas_width, canvas_height + + def __call__(self): + w, h = self.canvas_width, self.canvas_height + return [np.random.uniform(0, 1) * w, np.random.uniform(0, 1) * h] + + +class NaiveCoordInit: + def __init__(self, pred, gt, format='[bs x c x 2D]', replace_sampling=True): + if isinstance(pred, torch.Tensor): + pred = pred.detach().cpu().numpy() + if isinstance(gt, torch.Tensor): + gt = gt.detach().cpu().numpy() + + if format == '[bs x c x 2D]': + self.map = ((pred[0] - gt[0]) ** 2).sum(0) + elif format == ['[2D x c]']: + self.map = ((pred - gt) ** 2).sum(-1) + else: + raise ValueError + self.replace_sampling = replace_sampling + + def __call__(self): + coord = np.where(self.map == self.map.max()) + coord_h, coord_w = coord[0][0], coord[1][0] + if self.replace_sampling: + self.map[coord_h, coord_w] = -1 + return [coord_w, coord_h] + + +class LinearDecayLR: + + def __init__(self, decay_every, decay_ratio): + self.decay_every = decay_every + self.decay_ratio = decay_ratio + + def __call__(self, n): + decay_time = n // self.decay_every + decay_step = n % self.decay_every + lr_s = self.decay_ratio ** decay_time + lr_e = self.decay_ratio ** (decay_time + 1) + r = decay_step / self.decay_every + lr = lr_s * (1 - r) + lr_e * r + return lr + + +class PainterOptimizer: + + def __init__(self, + renderer: Painter, + num_iter: int, + lr_config: DictConfig, + trainable_stroke: bool = False, + trainable_bg: bool = False): + self.renderer = renderer + self.num_iter = num_iter + self.trainable_stroke = trainable_stroke + self.trainable_bg = trainable_bg + + self.lr_base = { + 'point': lr_config.point, + 'color': lr_config.color, + 'stroke_width': lr_config.stroke_width, + 'stroke_color': lr_config.stroke_color, + 'bg': lr_config.bg + } + + self.learnable_params = [] # list[Dict] + + self.optimizer = None + self.scheduler = None + + def init_optimizers(self): + # optimizers + params = {} + self.renderer.set_parameters() + params['point'] = self.renderer.get_point_parameters() + if self.trainable_stroke: + params['stroke_width'], params['stroke_color'] = self.renderer.get_stroke_parameters() + else: + params['color'] = self.renderer.get_color_parameters() + + if self.trainable_bg: + params['bg'] = self.renderer.get_bg_parameters() + + self.learnable_params = [ + {'params': params[ki], 'lr': self.lr_base[ki]} for ki in sorted(params.keys()) + ] + self.optimizer = torch.optim.Adam(self.learnable_params) + # lr schedule + lr_lambda_fn = LinearDecayLR(self.num_iter, 0.4) + self.scheduler = LambdaLR(self.optimizer, lr_lambda=lr_lambda_fn, last_epoch=-1) + + def update_params(self, name: str, value: torch.tensor): + for param_group in self.learnable_params: + if param_group.get('_id') == name: + param_group['params'] = value + + def update_lr(self): + self.scheduler.step() + + def zero_grad_(self): + self.optimizer.zero_grad() + + def step_(self): + self.optimizer.step() + + def get_lr(self): + return self.optimizer.param_groups[0]['lr'] diff --git a/pytorch_svgrender/painter/live/xing_loss.py b/pytorch_svgrender/painter/live/xing_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..43d33cf7559a7c93f7f62aaf91d5354d15295c16 --- /dev/null +++ b/pytorch_svgrender/painter/live/xing_loss.py @@ -0,0 +1,66 @@ +import torch + + +def area(a, b, c): + return (c[1] - a[1]) * (b[0] - a[0]) - (b[1] - a[1]) * (c[0] - a[0]) + + +def triangle_area(A, B, C): + out = (C - A).flip([-1]) * (B - A) + out = out[..., 1] - out[..., 0] + return out + + +def compute_sine_theta(s1, s2): # s1 and s2 aret two segments to be uswed + # s1, s2 (2, 2) + v1 = s1[1, :] - s1[0, :] + v2 = s2[1, :] - s2[0, :] + # print(v1, v2) + sine_theta = (v1[0] * v2[1] - v1[1] * v2[0]) / (torch.norm(v1) * torch.norm(v2)) + return sine_theta + + +def xing_loss_fn(x_list, scale=1e-3): # x[npoints, 2] + loss = 0. + # print(f"points_len: {len(x_list)}") + for x in x_list: + # print(f"x: {x}") + seg_loss = 0. + N = x.size()[0] + assert N % 3 == 0, f'The segment number ({N}) is not correct!' + x = torch.cat([x, x[0, :].unsqueeze(0)], dim=0) # (N+1,2) + segments = torch.cat([x[:-1, :].unsqueeze(1), x[1:, :].unsqueeze(1)], dim=1) # (N, start/end, 2) + segment_num = int(N / 3) + for i in range(segment_num): + cs1 = segments[i * 3, :, :] # start control segs + cs2 = segments[i * 3 + 1, :, :] # middle control segs + cs3 = segments[i * 3 + 2, :, :] # end control segs + # print('the direction of the vectors:') + # print(compute_sine_theta(cs1, cs2)) + direct = (compute_sine_theta(cs1, cs2) >= 0).float() + opst = 1 - direct # another direction + sina = compute_sine_theta(cs1, cs3) # the angle between cs1 and cs3 + seg_loss += direct * torch.relu(- sina) + opst * torch.relu(sina) + # print(direct, opst, sina) + seg_loss /= segment_num + + templ = seg_loss + loss += templ * scale # area_loss * scale + + return loss / (len(x_list)) + + +if __name__ == "__main__": + # x = torch.rand([6, 2]) + # x = torch.tensor([[0,0], [1,1], [2,1], [1.5,0]]) + x = torch.tensor([[0, 0], [1, 1], [2, 1], [0.5, 0]]) + # x = torch.tensor([[1,0], [2,1], [0,1], [2,0]]) + scale = 1 # 0.5 + y = xing_loss_fn([x], scale) + print(y) + + x = torch.tensor([[0, 0], [1, 1], [2, 1], [2., 0]]) + # x = torch.tensor([[1,0], [2,1], [0,1], [2,0]]) + scale = 1 # 0.5 + y = xing_loss_fn([x], scale) + print(y) diff --git a/pytorch_svgrender/painter/style_clipdraw/__init__.py b/pytorch_svgrender/painter/style_clipdraw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3eb1940a6d05ac74768b0ebb0152d0dcf253ee33 --- /dev/null +++ b/pytorch_svgrender/painter/style_clipdraw/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .painter_params import Painter, PainterOptimizer +from .strotss import StyleLoss, VGG16Extractor, sample_indices + +__all__ = [ + 'Painter', 'PainterOptimizer', + 'StyleLoss', 'VGG16Extractor', 'sample_indices' +] diff --git a/pytorch_svgrender/painter/style_clipdraw/painter_params.py b/pytorch_svgrender/painter/style_clipdraw/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..fccbaccf49258c559acce1e8af13aafcb10e3699 --- /dev/null +++ b/pytorch_svgrender/painter/style_clipdraw/painter_params.py @@ -0,0 +1,208 @@ +import random +import copy +import pathlib + +import omegaconf +import pydiffvg +import torch + +from pytorch_svgrender.diffvg_warp import DiffVGState + + +class Painter(DiffVGState): + + def __init__( + self, + method_cfg: omegaconf.DictConfig, + diffvg_cfg: omegaconf.DictConfig, + num_strokes: int = 4, + canvas_size: int = 224, + device: torch.device = None, + ): + super(Painter, self).__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + + self.num_paths = num_strokes + self.max_width = method_cfg.max_width + self.num_stages = method_cfg.num_stages + + self.black_stroke_color = method_cfg.black_stroke_color + + self.path_svg = method_cfg.path_svg + self.strokes_per_stage = self.num_paths + self.optimize_flag = [] + + self.strokes_counter = 0 # counts the number of calls to "get_path" + + def init_image(self, stage=0): + if stage > 0: + # Noting: if multi stages training than add new strokes on existing ones + # don't optimize on previous strokes + self.optimize_flag = [False for i in range(len(self.shapes))] + for i in range(self.strokes_per_stage): + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag.append(True) + else: + num_paths_exists = 0 + if self.path_svg is not None and pathlib.Path(self.path_svg).exists(): + print(f"-> init svg from `{self.path_svg}` ...") + + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(self.path_svg) + # if you want to add more strokes to existing ones and optimize on all of them + num_paths_exists = len(self.shapes) + + for i in range(num_paths_exists, self.num_paths): + if self.black_stroke_color: + stroke_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + else: + stroke_color = torch.tensor([random.random(), random.random(), random.random(), random.random()]) + path = self.get_path() + self.shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color) + self.shape_groups.append(path_group) + self.optimize_flag = [True for i in range(len(self.shapes))] + + img = self.render_warp() + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + + return img + + def get_image(self, step=0): + img = self.render_warp(step) + + opacity = img[:, :, 3:4] + img = opacity * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - opacity) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self): + num_segments = random.randint(1, 3) + num_control_points = torch.zeros(num_segments, dtype=torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + + for j in range(num_segments): + radius = 0.1 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + points.append(p3) + p0 = p3 + points = torch.tensor(points).to(self.device) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + + path = pydiffvg.Path(num_control_points=num_control_points, + points=points, + stroke_width=torch.tensor(1.0), + is_closed=False) + self.strokes_counter += 1 + return path + + def clip_curve_shape(self): + for path in self.shapes: + path.stroke_width.data.clamp_(1.0, self.max_width) + for group in self.shape_groups: + group.stroke_color.data.clamp_(0.0, 1.0) + + @torch.no_grad() + def render_scaled(self, scale_factor=4): + """ + Scale the size of the rendered image + """ + _shapes = self.shapes + _shape_groups = self.shape_groups + + shapes_resized = copy.deepcopy(_shapes) + for i in range(len(_shapes)): + shapes_resized[i].stroke_width = _shapes[i].stroke_width * scale_factor + for j in range(len(_shapes[i].points)): + shapes_resized[i].points[j] = _shapes[i].points[j] * scale_factor + + # rescale the rendered image + self.shapes = shapes_resized + self.canvas_height = self.canvas_height * scale_factor + self.canvas_width = self.canvas_width * scale_factor + + def set_parameters(self): + # stroke`s location and width optimization + self.point_vars = [] + self.width_vars = [] + for i, path in enumerate(self.shapes): + if self.optimize_flag[i]: + path.points.requires_grad = True + self.point_vars.append(path.points) + path.stroke_width.requires_grad = True + self.width_vars.append(path.stroke_width) + + # for stroke' color optimization + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if self.optimize_flag[i]: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + return self.point_vars, self.width_vars, self.color_vars + + def save_svg(self, output_dir, name): + pydiffvg.save_svg('{}/{}.svg'.format(output_dir, name), + self.canvas_width, + self.canvas_height, + self.shapes, + self.shape_groups) + + +class PainterOptimizer: + + def __init__(self, renderer: Painter, points_lr: float, width_lr: float, color_lr: float): + self.renderer = renderer + + self.points_lr = points_lr + self.width_lr = width_lr + self.color_lr = color_lr + + self.points_optimizer, self.width_optimizer, self.color_optimizer = None, None, None + + def init_optimizers(self): + points_vars, stroke_width_vars, color_vars = self.renderer.set_parameters() + self.points_optimizer = torch.optim.Adam(points_vars, lr=self.points_lr) + self.width_optimizer = torch.optim.Adam(stroke_width_vars, lr=self.width_lr) + self.color_optimizer = torch.optim.Adam(color_vars, lr=self.color_lr) + + def update_lr(self, step, decay_steps=(500, 750)): + if step % decay_steps[0] == 0: + for param_group in self.points_optimizer.param_groups: + param_group['lr'] = 0.4 + if step % decay_steps[1] == 0: + for param_group in self.points_optimizer.param_groups: + param_group['lr'] = 0.1 + + def zero_grad_(self): + self.points_optimizer.zero_grad() + self.width_optimizer.zero_grad() + self.color_optimizer.zero_grad() + + def step_(self): + self.points_optimizer.step() + self.width_optimizer.step() + self.color_optimizer.step() + + def get_lr(self): + return self.points_optimizer.param_groups[0]['lr'] diff --git a/pytorch_svgrender/painter/style_clipdraw/strotss.py b/pytorch_svgrender/painter/style_clipdraw/strotss.py new file mode 100644 index 0000000000000000000000000000000000000000..c56348dc5460e3894d18a16bfef364ea3e65c011 --- /dev/null +++ b/pytorch_svgrender/painter/style_clipdraw/strotss.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +import math + +import torch +import torch.nn as nn +import torchvision +import numpy as np + + +class VGG16Extractor(nn.Module): + def __init__(self, space): + super().__init__() + # load pretrained model + self.vgg_layers = torchvision.models.vgg16( + weights=torchvision.models.VGG16_Weights.DEFAULT + ).features + + for param in self.parameters(): + param.requires_grad = False + self.capture_layers = [1, 3, 6, 8, 11, 13, 15, 22, 29] + self.space = space + + def forward_base(self, x): + feat = [x] + for i in range(len(self.vgg_layers)): + x = self.vgg_layers[i](x) + if i in self.capture_layers: + feat.append(x) + return feat + + def forward(self, x): + if self.space != 'vgg': + x = (x + 1.) / 2. + x = x - (torch.Tensor([0.485, 0.456, 0.406]).to(x.device).view(1, -1, 1, 1)) + x = x / (torch.Tensor([0.229, 0.224, 0.225]).to(x.device).view(1, -1, 1, 1)) + feat = self.forward_base(x) + return feat + + def forward_samples_hypercolumn(self, X, samps=100): + feat = self.forward(X) + + xx, xy = np.meshgrid(np.arange(X.shape[2]), np.arange(X.shape[3])) + xx = np.expand_dims(xx.flatten(), 1) + xy = np.expand_dims(xy.flatten(), 1) + xc = np.concatenate([xx, xy], 1) + + samples = min(samps, xc.shape[0]) + + np.random.shuffle(xc) + xx = xc[:samples, 0] + yy = xc[:samples, 1] + + feat_samples = [] + for i in range(len(feat)): + + layer_feat = feat[i] + + # hack to detect lower resolution + if i > 0 and feat[i].size(2) < feat[i - 1].size(2): + xx = xx / 2.0 + yy = yy / 2.0 + + xx = np.clip(xx, 0, layer_feat.shape[2] - 1).astype(np.int32) + yy = np.clip(yy, 0, layer_feat.shape[3] - 1).astype(np.int32) + + features = layer_feat[:, :, xx[range(samples)], yy[range(samples)]] + feat_samples.append(features.clone().detach()) + + feat = torch.cat(feat_samples, 1) + return feat + + +class StyleLoss: + + def spatial_feature_extract(self, feat_result, feat_content, xx, xy): + l2, l3 = [], [] + device = feat_result[0].device + + # for each extracted layer + for i in range(len(feat_result)): + fr = feat_result[i] + fc = feat_content[i] + + # hack to detect reduced scale + if i > 0 and feat_result[i - 1].size(2) > feat_result[i].size(2): + xx = xx / 2.0 + xy = xy / 2.0 + + # go back to ints and get residual + xxm = np.floor(xx).astype(np.float32) + xxr = xx - xxm + + xym = np.floor(xy).astype(np.float32) + xyr = xy - xym + + # do bilinear resample + w00 = torch.from_numpy((1. - xxr) * (1. - xyr)).float().view(1, 1, -1, 1).to(device) + w01 = torch.from_numpy((1. - xxr) * xyr).float().view(1, 1, -1, 1).to(device) + w10 = torch.from_numpy(xxr * (1. - xyr)).float().view(1, 1, -1, 1).to(device) + w11 = torch.from_numpy(xxr * xyr).float().view(1, 1, -1, 1).to(device) + + xxm = np.clip(xxm.astype(np.int32), 0, fr.size(2) - 1) + xym = np.clip(xym.astype(np.int32), 0, fr.size(3) - 1) + + s00 = xxm * fr.size(3) + xym + s01 = xxm * fr.size(3) + np.clip(xym + 1, 0, fr.size(3) - 1) + s10 = np.clip(xxm + 1, 0, fr.size(2) - 1) * fr.size(3) + (xym) + s11 = np.clip(xxm + 1, 0, fr.size(2) - 1) * fr.size(3) + np.clip(xym + 1, 0, fr.size(3) - 1) + + fr = fr.view(1, fr.size(1), fr.size(2) * fr.size(3), 1) + fr = fr[:, :, s00, :].mul_(w00).add_(fr[:, :, s01, :].mul_(w01)).add_(fr[:, :, s10, :].mul_(w10)).add_( + fr[:, :, s11, :].mul_(w11)) + + fc = fc.view(1, fc.size(1), fc.size(2) * fc.size(3), 1) + fc = fc[:, :, s00, :].mul_(w00).add_(fc[:, :, s01, :].mul_(w01)).add_(fc[:, :, s10, :].mul_(w10)).add_( + fc[:, :, s11, :].mul_(w11)) + + l2.append(fr) + l3.append(fc) + + x_st = torch.cat([li.contiguous() for li in l2], 1) + c_st = torch.cat([li.contiguous() for li in l3], 1) + + xx = torch.from_numpy(xx).view(1, 1, x_st.size(2), 1).float().to(device) + yy = torch.from_numpy(xy).view(1, 1, x_st.size(2), 1).float().to(device) + + x_st = torch.cat([x_st, xx, yy], 1) + c_st = torch.cat([c_st, xx, yy], 1) + return x_st, c_st + + def rgb_to_yuv(self, rgb): + C = torch.Tensor( + [[0.577350, 0.577350, 0.577350], [-0.577350, 0.788675, -0.211325], [-0.577350, -0.211325, 0.788675]] + ).to(rgb.device) + yuv = torch.mm(C, rgb) + return yuv + + def pairwise_distances_cos(self, x, y): + x_norm = torch.sqrt((x ** 2).sum(1).view(-1, 1)) + y_t = torch.transpose(y, 0, 1) + y_norm = torch.sqrt((y ** 2).sum(1).view(1, -1)) + dist = 1. - torch.mm(x, y_t) / x_norm / y_norm + return dist + + def pairwise_distances_sq_l2(self, x, y): + x_norm = (x ** 2).sum(1).view(-1, 1) + y_t = torch.transpose(y, 0, 1) + y_norm = (y ** 2).sum(1).view(1, -1) + dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t) + return torch.clamp(dist, 1e-5, 1e5) / x.size(1) + + def distmat(self, x, y, cos_d=True): + if cos_d: + M = self.pairwise_distances_cos(x, y) + else: + M = torch.sqrt(self.pairwise_distances_sq_l2(x, y)) + return M + + def style_loss(self, X, Y): + d = X.shape[1] + + if d == 3: + X = self.rgb_to_yuv(X.transpose(0, 1).contiguous().view(d, -1)).transpose(0, 1) + Y = self.rgb_to_yuv(Y.transpose(0, 1).contiguous().view(d, -1)).transpose(0, 1) + else: + X = X.transpose(0, 1).contiguous().view(d, -1).transpose(0, 1) + Y = Y.transpose(0, 1).contiguous().view(d, -1).transpose(0, 1) + + # Relaxed EMD + CX_M = self.distmat(X, Y, cos_d=True) + + if d == 3: + CX_M = CX_M + self.distmat(X, Y, cos_d=False) + + m1, m1_inds = CX_M.min(1) + m2, m2_inds = CX_M.min(0) + + remd = torch.max(m1.mean(), m2.mean()) + + return remd + + def moment_loss(self, X, Y, moments=[1, 2]): + loss = 0. + X = X.squeeze().t() + Y = Y.squeeze().t() + + mu_x = torch.mean(X, 0, keepdim=True) + mu_y = torch.mean(Y, 0, keepdim=True) + mu_d = torch.abs(mu_x - mu_y).mean() + + if 1 in moments: + loss = loss + mu_d + + if 2 in moments: + X_c = X - mu_x + Y_c = Y - mu_y + X_cov = torch.mm(X_c.t(), X_c) / (X.shape[0] - 1) + Y_cov = torch.mm(Y_c.t(), Y_c) / (Y.shape[0] - 1) + + D_cov = torch.abs(X_cov - Y_cov).mean() + loss = loss + D_cov + + return loss + + def forward(self, feat_result, feat_content, feat_style, indices, content_weight, moment_weight=1.0): + # spatial feature extract + num_locations = 1024 + spatial_result, spatial_content = self.spatial_feature_extract( + feat_result, feat_content, indices[0][:num_locations], indices[1][:num_locations] + ) + + # loss_content = content_loss(spatial_result, spatial_content) + + d = feat_style.shape[1] + spatial_style = feat_style.view(1, d, -1, 1) + feat_max = 3 + 2 * 64 + 128 * 2 + 256 * 3 + 512 * 2 # (sum of all extracted channels) + + loss_remd = self.style_loss(spatial_result[:, :feat_max, :, :], spatial_style[:, :feat_max, :, :]) + + loss_moment = self.moment_loss(spatial_result[:, :-2, :, :], + spatial_style, + moments=[1, 2]) # -2 is so that it can fit? + # palette matching + content_weight_frac = 1. / max(content_weight, 1.) + loss_moment += content_weight_frac * self.style_loss(spatial_result[:, :3, :, :], spatial_style[:, :3, :, :]) + + loss_style = loss_remd + moment_weight * loss_moment + # print(f'Style: {loss_style.item():.3f}, Content: {loss_content.item():.3f}') + + style_weight = 1.0 + moment_weight + loss_total = (loss_style) / (content_weight + style_weight) + return loss_total + + +def sample_indices(feat_content, feat_style): + const = 128 ** 2 # 32k or so + big_size = feat_content.shape[2] * feat_content.shape[3] # num feaxels + + stride_x = int(max(math.floor(math.sqrt(big_size // const)), 1)) + offset_x = np.random.randint(stride_x) + stride_y = int(max(math.ceil(math.sqrt(big_size // const)), 1)) + offset_y = np.random.randint(stride_y) + xx, xy = np.meshgrid( + np.arange(feat_content.shape[2])[offset_x::stride_x], + np.arange(feat_content.shape[3])[offset_y::stride_y] + ) + xx = xx.flatten() + xy = xy.flatten() + return xx, xy diff --git a/pytorch_svgrender/painter/svgdreamer/VPSD_pipeline.py b/pytorch_svgrender/painter/svgdreamer/VPSD_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..29f1d1f4b164bae3082a9efce053cfebcacc7615 --- /dev/null +++ b/pytorch_svgrender/painter/svgdreamer/VPSD_pipeline.py @@ -0,0 +1,578 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import re +from typing import Any, List, Optional, Union, Dict +from omegaconf import DictConfig + +import torch +import torch.nn.functional as F +from torchvision import transforms +from diffusers import StableDiffusionPipeline, UNet2DConditionModel +from diffusers import DDIMScheduler +from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import ( + rescale_noise_cfg, StableDiffusionPipelineOutput) +from diffusers.models.attention_processor import LoRAAttnProcessor +from diffusers.loaders import AttnProcsLayers +from pytorch_svgrender.diffusers_warp import init_StableDiffusion_pipeline, init_diffusers_unet + + +class VectorizedParticleSDSPipeline(torch.nn.Module): + + def __init__(self, args: DictConfig, diffuser_cfg: DictConfig, guidance_cfg: DictConfig, device: torch.device): + super().__init__() + self.args = args + self.device = device + assert guidance_cfg.n_particle >= guidance_cfg.vsd_n_particle + assert guidance_cfg.n_particle >= guidance_cfg.phi_n_particle + + pipe_kwargs = { + "device": self.device, + "torch_dtype": torch.float32, + "local_files_only": not diffuser_cfg.download, + "force_download": diffuser_cfg.force_download, + "resume_download": diffuser_cfg.resume_download, + "ldm_speed_up": args.x.ldm_speed_up, + "enable_xformers": args.x.enable_xformers, + "gradient_checkpoint": args.x.gradient_checkpoint, + "cpu_offload": args.x.cpu_offload, + "vae_slicing": False + } + + # load pretrained model + self.sd_pipeline = init_StableDiffusion_pipeline( + args.x.model_id, + custom_pipeline=StableDiffusionPipeline, + custom_scheduler=DDIMScheduler, + **pipe_kwargs + ) + # disable grads + self.sd_pipeline.vae.requires_grad_(False) + self.sd_pipeline.text_encoder.requires_grad_(False) + self.sd_pipeline.unet.requires_grad_(False) + # set components + self.vae = self.sd_pipeline.vae + self.unet = self.sd_pipeline.unet + self.scheduler = self.sd_pipeline.scheduler + self.tokenizer = self.sd_pipeline.tokenizer + self.text_encoder = self.sd_pipeline.text_encoder + + if guidance_cfg.phi_model == 'lora': + if guidance_cfg.phi_single: # default, use the single unet + # load LoRA model from the pretrained model + unet_ = self.unet + else: + # create a new unet model + pipe_kwargs.pop('cpu_offload') + pipe_kwargs.pop('vae_slicing') + unet_ = init_diffusers_unet(args.x.model_id, **pipe_kwargs) + + # set correct LoRA layers + self.unet_phi, phi_model_layers = self.set_lora_layers(unet_) + self.phi_params = list(phi_model_layers.parameters()) + self.lora_cross_attention_kwargs = {"scale": guidance_cfg.lora_attn_scale} \ + if guidance_cfg.use_attn_scale else {} + self.vae_phi = self.vae + self.vae_phi.requires_grad_(False) + + elif guidance_cfg.phi_model == 'unet_simple': + self.unet_phi = UNet2DConditionModel( + sample_size=64, + in_channels=4, + out_channels=4, + layers_per_block=1, + block_out_channels=(128, 256, 384, 512), + down_block_types=( + "DownBlock2D", + "AttnDownBlock2D", + "AttnDownBlock2D", + "AttnDownBlock2D", + ), + up_block_types=( + "AttnUpBlock2D", + "AttnUpBlock2D", + "AttnUpBlock2D", + "UpBlock2D", + ), + cross_attention_dim=self.unet.config.cross_attention_dim + ).to(device) + self.phi_params = list(self.unet_phi.parameters()) + self.vae_phi = self.vae + # reset lora + guidance_cfg.use_attn_scale = False + guidance_cfg.lora_attn_scale = False + + # hyper-params + self.phi_single = guidance_cfg.phi_single + self.guidance_scale: float = guidance_cfg.guidance_scale + self.guidance_scale_lora: float = guidance_cfg.phi_guidance_scale + self.grad_clip_val: Union[float, None] = guidance_cfg.grad_clip_val + self.vsd_n_particle: int = guidance_cfg.vsd_n_particle + self.phi_n_particle: int = guidance_cfg.phi_n_particle + self.t_schedule: str = guidance_cfg.t_schedule + self.t_range = list(guidance_cfg.t_range) + print( + f'n_particles: {guidance_cfg.n_particle}, ' + f'enhance_particles: {guidance_cfg.particle_aug}, ' + f'n_particles of score: {self.vsd_n_particle}, ' + f'n_particles of phi_model: {self.phi_n_particle}, \n' + f't_range: {self.t_range}, ' + f't_schedule: {self.t_schedule}, \n' + f'guidance_scale: {self.guidance_scale}, phi_guidance_scale: {self.guidance_scale_lora}.' + ) + print(f"phi_model: {guidance_cfg.phi_model}, " + f"use lora_cross_attn: {guidance_cfg.use_attn_scale}, " + f"lora_attn_scale: {guidance_cfg.lora_attn_scale}. \n") + + # for convenience + self.num_train_timesteps = self.scheduler.config.num_train_timesteps + self.alphas = self.scheduler.alphas_cumprod.to(self.device) + self.text_embeddings = None + self.text_embedd_cond, self.text_embedd_uncond = None, None + self.text_embeddings_phi = None + self.t = None + + def set_lora_layers(self, unet): # set correct lora layers + lora_attn_procs = {} + for name in unet.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") \ + else unet.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = unet.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(unet.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = unet.config.block_out_channels[block_id] + + lora_attn_procs[name] = LoRAAttnProcessor( + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim + ).to(self.device) + unet.set_attn_processor(lora_attn_procs) + lora_layers = AttnProcsLayers(unet.attn_processors) + + unet.requires_grad_(False) + for param in lora_layers.parameters(): + param.requires_grad_(True) + return unet, lora_layers + + @torch.no_grad() + def encode_prompt(self, + prompt, + device, + do_classifier_free_guidance, + negative_prompt=None): + # text conditional embed + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + prompt_embeds = self.text_encoder(text_inputs.input_ids.to(device))[0] + + if do_classifier_free_guidance: + if negative_prompt is None: + uncond_tokens = [""] + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + else: + uncond_tokens = negative_prompt + + # unconditional embed + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=prompt_embeds.shape[1], + truncation=True, + return_tensors="pt", + ) + negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(device))[0] + + concat_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + return concat_prompt_embeds, negative_prompt_embeds, prompt_embeds + + return prompt_embeds, None, None + + def sampling(self, + vae, + unet, + scheduler, + prompt: Union[str, List[str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0): + # 0. Default height and width to unet + vae_scale_factor = 2 ** (len(vae.config.block_out_channels) - 1) + height = height or unet.config.sample_size * vae_scale_factor + width = width or unet.config.sample_size * vae_scale_factor + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = 1 + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + prompt_embeds, _, _ = self.encode_prompt( + prompt, + self.device, + do_classifier_free_guidance, + negative_prompt, + ) + + # 4. Prepare timesteps + scheduler.set_timesteps(num_inference_steps, device=self.device) + timesteps = scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = unet.config.in_channels + latents = self.sd_pipeline.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + self.device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.sd_pipeline.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.sd_pipeline.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = unet( + latent_model_input, + t, + encoder_hidden_states=prompt_embeds, + cross_attention_kwargs=cross_attention_kwargs, + return_dict=False, + )[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + if do_classifier_free_guidance and guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) + + # compute the previous noisy sample x_t -> x_t-1 + latents = scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] + + # update progress_bar + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + if not output_type == "latent": + image = vae.decode(latents / vae.config.scaling_factor, return_dict=False)[0] + image, has_nsfw_concept = self.sd_pipeline.run_safety_checker(image, self.device, prompt_embeds.dtype) + else: + image = latents + has_nsfw_concept = None + + if has_nsfw_concept is None: + do_denormalize = [True] * image.shape[0] + else: + do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] + + image = self.sd_pipeline.image_processor.postprocess(image, output_type=output_type, + do_denormalize=do_denormalize) + # Offload last model to CPU + if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: + self.final_offload_hook.offload() + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + def sample(self, + prompt, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil"): + return self.sampling(self.vae, self.unet, self.scheduler, + prompt=prompt, + height=height, width=width, + num_inference_steps=num_inference_steps, + guidance_scale=self.guidance_scale, + generator=generator, + output_type=output_type) + + def sample_lora(self, + prompt, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + output_type: Optional[str] = "pil"): + return self.sampling(self.vae_phi, self.unet_phi, self.scheduler, + prompt=prompt, + height=height, width=width, + num_inference_steps=num_inference_steps, + guidance_scale=self.guidance_scale_lora, + generator=generator, + cross_attention_kwargs=self.lora_cross_attention_kwargs, + output_type=output_type) + + def encode2latent(self, images): + images = (2 * images - 1).clamp(-1.0, 1.0) # images: [B, 3, H, W] + # encode images + latents = self.vae.encode(images).latent_dist.sample() + latents = self.vae.config.scaling_factor * latents + return latents + + def get_noise_map(self, noise_pred, guidance_scale=7.5, use_cfg=True): + if use_cfg: + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_map = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + return noise_map + else: + return noise_pred + + def train_phi_model(self, + pred_rgb: torch.Tensor, + new_timesteps: bool = False, + as_latent: bool = False): + # interp to 512x512 to be fed into vae. + if as_latent: + latents = pred_rgb + else: + pred_rgb_ = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode2latent(pred_rgb_) + + # get phi particles + indices = torch.randperm(latents.size(0)) + latents_phi = latents[indices[:self.phi_n_particle]] + latents_phi = latents_phi.detach() + + # get timestep + if new_timesteps: + t = torch.randint(0, self.num_train_timesteps, (1,), device=self.device) + else: + t = self.t + + noise = torch.randn_like(latents_phi) + noisy_latents = self.scheduler.add_noise(latents_phi, noise, t) + + if self.scheduler.config.prediction_type == "epsilon": + target = noise + elif self.scheduler.config.prediction_type == "v_prediction": + target = self.scheduler.get_velocity(latents_phi, noise, t) + else: + raise ValueError(f"Unknown prediction type {self.scheduler.config.prediction_type}") + + # predict the noise residual and compute loss + noise_pred = self.unet_phi( + noisy_latents, t, + encoder_hidden_states=self.text_embeddings_phi, + cross_attention_kwargs=self.lora_cross_attention_kwargs, + ).sample + + return F.mse_loss(noise_pred, target, reduction="mean") + + def train_phi_model_refl(self, + pred_rgb: torch.Tensor, + weight: float = 1, + new_timesteps: bool = True): + # interp to 512x512 to be fed into vae. + pred_rgb_ = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + latents = self.encode2latent(pred_rgb_) + + # get phi particles + indices = torch.randperm(latents.size(0)) + latents_phi = latents[indices[:self.phi_n_particle]] + latents_phi = latents_phi.detach() + + # get timestep + if new_timesteps: + t = torch.randint(0, self.num_train_timesteps, (1,), device=self.device) + else: + t = self.t + + noise = torch.randn_like(latents_phi) + noisy_latents = self.scheduler.add_noise(latents_phi, noise, t) + + if self.scheduler.config.prediction_type == "epsilon": + target = noise + elif self.scheduler.config.prediction_type == "v_prediction": + target = self.scheduler.get_velocity(latents_phi, noise, t) + else: + raise ValueError(f"Unknown prediction type {self.scheduler.config.prediction_type}") + + # predict the noise residual and compute loss + noise_pred = self.unet_phi( + noisy_latents, t, + encoder_hidden_states=self.text_embedd_cond, + cross_attention_kwargs=self.lora_cross_attention_kwargs, + ).sample + + rewards = torch.tensor(weight, dtype=torch.float32, device=self.device) + return rewards * F.mse_loss(noise_pred, target, reduction="mean") + + def schedule_timestep(self, step): + min_step = int(self.num_train_timesteps * self.t_range[0]) + max_step = int(self.num_train_timesteps * self.t_range[1]) + if self.t_schedule == 'randint': + t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) + elif re.match(r"max_([\d.]+)_(\d+)", self.t_schedule): + # Anneal time schedule + # e.g: t_schedule == 'max_0.5_200' + # [0.02, 0.98] -> [0.02, 0.5] after 200 steps + tag, t_val, step_upd = str(self.t_schedule).split('_') + t_val, step_upd = float(t_val), int(step_upd) + if step >= step_upd: + max_step = int(self.num_train_timesteps * t_val) + t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) + elif re.match(r"min_([\d.]+)_(\d+)", self.t_schedule): + # Anneal time schedule + # e.g: t_schedule == 'min_0.5_200' + # [0.02, 0.98] -> [0.5, 0.98] after 200 steps + tag, t_val, step_upd = str(self.t_schedule).split('_') + t_val, step_upd = float(t_val), int(step_upd) + if step >= step_upd: + min_step = int(self.num_train_timesteps * t_val) + t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) + else: + raise NotImplementedError(f"{self.t_schedule} is not support.") + return t + + def set_text_embeddings(self, prompt, negative_prompt, do_classifier_free_guidance): + if self.text_embeddings is not None: + return + + # encode text prompt + text_embeddings, text_embeddings_uncond, text_embeddings_cond = \ + self.encode_prompt(prompt, self.device, do_classifier_free_guidance, negative_prompt=negative_prompt) + + # set pretrained model text embedding + text_embeddings_uncond, text_embeddings_cond = text_embeddings.chunk(2) + self.text_embedd_uncond, self.text_embedd_cond = text_embeddings_uncond, text_embeddings_cond + text_embeddings_unconds = text_embeddings_uncond.repeat_interleave(self.vsd_n_particle, dim=0) + text_embeddings_conds = text_embeddings_cond.repeat_interleave(self.vsd_n_particle, dim=0) + text_embeddings = torch.cat([text_embeddings_unconds, text_embeddings_conds]) + self.text_embeddings = text_embeddings + + # set phi model text embedding + self.text_embeddings_phi = text_embeddings_cond.repeat_interleave(self.phi_n_particle, dim=0) + + def x_augment(self, x: torch.Tensor, img_size: int = 512): + augment_compose = transforms.Compose([ + transforms.RandomPerspective(distortion_scale=0.5, p=0.7), + transforms.RandomCrop(size=(img_size, img_size), pad_if_needed=True, padding_mode='reflect') + ]) + return augment_compose(x) + + def variational_score_distillation(self, + pred_rgb: torch.Tensor, + step: int, + prompt: Union[List, str], + negative_prompt: Union[List, str] = None, + grad_scale: float = 1.0, + enhance_particle: bool = False, + im_size: int = 512, + as_latent: bool = False): + bz = pred_rgb.shape[0] + + # data enhancement for the input particles + pred_rgb = self.x_augment(pred_rgb, im_size) if enhance_particle else pred_rgb + + # interp to 512x512 to be fed into vae. + if as_latent: + latents = F.interpolate(pred_rgb, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + else: + pred_rgb_ = F.interpolate(pred_rgb, (512, 512), mode='bilinear', align_corners=False) + # encode image into latents with vae, requires grad! + # latents = self.encode2latent(pred_rgb_) + latent_list = [self.encode2latent(pred_rgb_[i].unsqueeze(0)) for i in range(bz)] + latents = torch.cat(latent_list, dim=0) + latents = latents.to(self.device) + + # random sample n_particle_vsd particles from latents + latents_vsd = latents[torch.randperm(bz)[:self.vsd_n_particle]] + + # encode input prompt + do_classifier_free_guidance = True + self.set_text_embeddings(prompt, negative_prompt, do_classifier_free_guidance) + text_embeddings = self.text_embeddings + + # timestep a.k.a noise level + self.t = self.schedule_timestep(step) + + # predict the noise residual with unet, stop gradient + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents_vsd) + latents_noisy = self.scheduler.add_noise(latents_vsd, noise, self.t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) if do_classifier_free_guidance else latents_noisy + # pretrained noise prediction network + noise_pred_pretrain = self.unet( + latent_model_input, self.t, + encoder_hidden_states=text_embeddings, + cross_attention_kwargs={'scale': 0.0} if self.phi_single else {} + ).sample + + # use conditional text embeddings in phi_model + _, text_embeddings_cond = text_embeddings.chunk(2) + # estimated noise prediction network + noise_pred_est = self.unet_phi( + latents_noisy, self.t, + encoder_hidden_states=text_embeddings_cond, + cross_attention_kwargs=self.lora_cross_attention_kwargs + ).sample + + # get pretrained score + noise_pred_pretrain = self.get_noise_map(noise_pred_pretrain, self.guidance_scale, use_cfg=True) + # get estimated score + noise_pred_est = self.get_noise_map(noise_pred_est, self.guidance_scale_lora, use_cfg=False) + + # w(t), sigma_t^2 + w = (1 - self.alphas[self.t]) + grad = grad_scale * w * (noise_pred_pretrain - noise_pred_est.detach()) + grad = torch.nan_to_num(grad) + + # grad clipping for stable training + if self.grad_clip_val is not None and self.grad_clip_val > 0: + grad = grad.clamp(-self.grad_clip_val, self.grad_clip_val) + + # re-parameterization trick: + # d(loss)/d(latents) = latents - target = latents - (latents - grad) = grad + target = (latents_vsd - grad).detach() + loss_vpsd = 0.5 * F.mse_loss(latents_vsd, target, reduction="sum") + + return loss_vpsd, grad.norm(), latents, self.t diff --git a/pytorch_svgrender/painter/svgdreamer/__init__.py b/pytorch_svgrender/painter/svgdreamer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..247aca5c372cf9d7c9789d08539fb9115d36991d --- /dev/null +++ b/pytorch_svgrender/painter/svgdreamer/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: __init__.py +# Copyright (c) 2024, XiMing Xing. +# License: MPL-2.0 License + +from .painter_params import Painter, PainterOptimizer +from .loss import channel_saturation_penalty_loss +from .VPSD_pipeline import VectorizedParticleSDSPipeline diff --git a/pytorch_svgrender/painter/svgdreamer/loss.py b/pytorch_svgrender/painter/svgdreamer/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ba6d62b5c01896cb4a84db063cc45e402ea9e32f --- /dev/null +++ b/pytorch_svgrender/painter/svgdreamer/loss.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import torch + + +def channel_saturation_penalty_loss(x: torch.Tensor): + assert x.shape[1] == 3 + r_channel = x[:, 0, :, :] + g_channel = x[:, 1, :, :] + b_channel = x[:, 2, :, :] + channel_accumulate = torch.pow(r_channel, 2) + torch.pow(g_channel, 2) + torch.pow(b_channel, 2) + return channel_accumulate.mean() / 3 diff --git a/pytorch_svgrender/painter/svgdreamer/painter_params.py b/pytorch_svgrender/painter/svgdreamer/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..069a4d6f94a1ff2bde234b6ccfe3433365882ebd --- /dev/null +++ b/pytorch_svgrender/painter/svgdreamer/painter_params.py @@ -0,0 +1,808 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import math +import copy +import random +import pathlib +from typing import Dict + +from shapely.geometry.polygon import Polygon +import omegaconf +import cv2 +import numpy as np +import pydiffvg +import torch +from torch.optim.lr_scheduler import LambdaLR + +from pytorch_svgrender.diffvg_warp import DiffVGState +from pytorch_svgrender.libs.solver.optim import get_optimizer + + +class Painter(DiffVGState): + + def __init__( + self, + diffvg_cfg: omegaconf.DictConfig, + style: str, + num_segments: int, + segment_init: str, + radius: int = 20, + canvas_size: int = 600, + n_grid: int = 32, + trainable_bg: bool = False, + stroke_width: int = 3, + path_svg=None, + device=None, + ): + super().__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + + self.style = style + + self.num_segments = num_segments + self.segment_init = segment_init + self.radius = radius + + """pixelart params""" + self.n_grid = n_grid # divide the canvas into n grids + self.pixel_per_grid = self.canvas_width // self.n_grid + """sketch params""" + self.stroke_width = stroke_width + """iconography params""" + self.color_ref = None + + self.path_svg = path_svg + self.optimize_flag = [] + + self.strokes_counter = 0 # counts the number of calls to "get_path" + + # Background color + self.para_bg = torch.tensor([1., 1., 1.], requires_grad=trainable_bg, device=self.device) + + self.target_img = None + self.pos_init_method = None + + def component_wise_path_init(self, gt, pred, init_type: str = 'sparse'): + # set target image + self.target_img = gt + + if init_type == 'random': + self.pos_init_method = RandomCoordInit(self.canvas_height, self.canvas_width) + elif init_type == 'sparse': + # when initialized for the first time, the render result is None + if pred is None: + pred = self.para_bg.view(1, -1, 1, 1).repeat(1, 1, self.canvas_height, self.canvas_width) + # then pred is the render result + self.pos_init_method = SparseCoordInit(pred, gt) + elif init_type == 'naive': + if pred is None: + pred = self.para_bg.view(1, -1, 1, 1).repeat(1, 1, self.canvas_height, self.canvas_width) + self.pos_init_method = NaiveCoordInit(pred, gt) + else: + raise NotImplementedError(f"'{init_type}' is not support.") + + def init_image(self, stage=0, num_paths=0): + self.cur_shapes, self.cur_shape_groups = [], [] + + # or init svg by pydiffvg + if self.style in ['pixelart', 'low-poly']: # update path definition + num_paths = self.n_grid + + if stage > 0: + # Noting: if multi stages training than add new strokes on existing ones + # don't optimize on previous strokes + self.optimize_flag = [False for i in range(len(self.shapes))] + for i in range(num_paths): + if self.style == 'iconography': + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([self.strokes_counter - 1]), + fill_color=fill_color_init, + stroke_color=None + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + self.optimize_flag.append(True) + + elif self.style in ['pixelart', 'low-poly']: + for j in range(num_paths): + path = self.get_path(coord=[i, j]) + self.shapes.append(path) + self.cur_shapes.append(path) + + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.LongTensor([i * num_paths + j]), + fill_color=fill_color_init, + stroke_color=None, + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + self.optimize_flag.append(True) + + elif self.style in ['ink', 'sketch']: + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + stroke_color_init = [0.0, 0.0, 0.0] + [random.random()] + stroke_color_init = torch.FloatTensor(stroke_color_init) + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style == 'painting': + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + wref, href = self.color_ref + wref = max(0, min(int(wref), self.canvas_width - 1)) + href = max(0, min(int(href), self.canvas_height - 1)) + stroke_color_init = list(self.target_img[0, :, href, wref]) + [1.] + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=torch.FloatTensor(stroke_color_init) + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + else: + num_paths_exists = 0 + if self.path_svg is not None and pathlib.Path(self.path_svg).exists(): + print(f"-> init svg from `{self.path_svg}` ...") + + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(self.path_svg) + # if you want to add more strokes to existing ones and optimize on all of them + num_paths_exists = len(self.shapes) + + self.cur_shapes = self.shapes + self.cur_shape_groups = self.shape_groups + + for i in range(num_paths_exists, num_paths): + if self.style == 'iconography': + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + wref, href = self.color_ref + wref = max(0, min(int(wref), self.canvas_width - 1)) + href = max(0, min(int(href), self.canvas_height - 1)) + fill_color_init = list(self.target_img[0, :, href, wref]) + [1.] + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([self.strokes_counter - 1]), + fill_color=torch.FloatTensor(fill_color_init), + stroke_color=None + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style in ['pixelart', 'low-poly']: + for j in range(num_paths): + path = self.get_path(coord=[i, j]) + self.shapes.append(path) + self.cur_shapes.append(path) + + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.LongTensor([i * num_paths + j]), + fill_color=fill_color_init, + stroke_color=None, + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style in ['sketch', 'ink']: + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + stroke_color_init = [0.0, 0.0, 0.0] + [random.random()] + stroke_color_init = torch.FloatTensor(stroke_color_init) + + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style in ['painting']: + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + if self.color_ref is None: + stroke_color_val = np.random.uniform(size=[4]) + stroke_color_val[-1] = 1.0 + stroke_color_init = torch.FloatTensor(stroke_color_val) + else: + wref, href = self.color_ref + wref = max(0, min(int(wref), self.canvas_width - 1)) + href = max(0, min(int(href), self.canvas_height - 1)) + stroke_color_init = list(self.target_img[0, :, href, wref]) + [1.] + stroke_color_init = torch.FloatTensor(stroke_color_init) + + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + self.optimize_flag = [True for i in range(len(self.shapes))] + + img = self.get_image() + return img + + def get_image(self, step: int = 0): + img = self.render_warp(step) + img = img[:, :, 3:4] * img[:, :, :3] + self.para_bg * (1 - img[:, :, 3:4]) + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self, coord=None): + num_segments = self.num_segments + + points = [] + if self.style == 'iconography': + # init segment + if self.segment_init == 'circle': + num_control_points = [2] * num_segments + radius = self.radius if self.radius is not None else np.random.uniform(0.5, 1) + if self.pos_init_method is not None: + center = self.pos_init_method() + else: + center = (random.random(), random.random()) + bias = center + self.color_ref = copy.deepcopy(bias) + + avg_degree = 360 / (num_segments * 3) + for i in range(0, num_segments * 3): + point = ( + np.cos(np.deg2rad(i * avg_degree)), np.sin(np.deg2rad(i * avg_degree)) + ) + points.append(point) + + points = torch.FloatTensor(points) * radius + torch.FloatTensor(bias).unsqueeze(dim=0) + elif self.segment_init == 'random': + num_control_points = [2] * num_segments + p0 = self.pos_init_method() + self.color_ref = copy.deepcopy(p0) + points.append(p0) + + for j in range(num_segments): + radius = self.radius + p1 = (p0[0] + radius * np.random.uniform(-0.5, 0.5), + p0[1] + radius * np.random.uniform(-0.5, 0.5)) + p2 = (p1[0] + radius * np.random.uniform(-0.5, 0.5), + p1[1] + radius * np.random.uniform(-0.5, 0.5)) + p3 = (p2[0] + radius * np.random.uniform(-0.5, 0.5), + p2[1] + radius * np.random.uniform(-0.5, 0.5)) + points.append(p1) + points.append(p2) + if j < num_segments - 1: + points.append(p3) + p0 = p3 + points = torch.FloatTensor(points) + else: + raise NotImplementedError(f"{self.segment_init} is not exists.") + + path = pydiffvg.Path( + num_control_points=torch.LongTensor(num_control_points), + points=points, + stroke_width=torch.tensor(0.0), + is_closed=True + ) + + elif self.style in ['sketch', 'painting', 'ink']: + num_control_points = torch.zeros(num_segments, dtype=torch.long) + 2 + points = [] + p0 = [random.random(), random.random()] + points.append(p0) + + # select color by first point coordinate + color_ref = copy.deepcopy(p0) + color_ref[0] *= self.canvas_width + color_ref[1] *= self.canvas_height + self.color_ref = color_ref + + for j in range(num_segments): + radius = 0.1 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + points.append(p3) + p0 = p3 + points = torch.tensor(points).to(self.device) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + + path = pydiffvg.Path(num_control_points=torch.LongTensor(num_control_points), + points=points, + stroke_width=torch.tensor(float(self.stroke_width)), + is_closed=False) + + elif self.style in ['pixelart', 'low-poly']: + x = coord[0] * self.pixel_per_grid + y = coord[1] * self.pixel_per_grid + points = torch.FloatTensor([ + [x, y], + [x + self.pixel_per_grid, y], + [x + self.pixel_per_grid, y + self.pixel_per_grid], + [x, y + self.pixel_per_grid] + ]).to(self.device) + path = pydiffvg.Polygon(points=points, + stroke_width=torch.tensor(0.0), + is_closed=True) + + self.strokes_counter += 1 + return path + + def clip_curve_shape(self): + if self.style in ['sketch', 'ink']: + for group in self.shape_groups: + group.stroke_color.data[:3].clamp_(0., 0.) # to force black stroke + group.stroke_color.data[-1].clamp_(0., 1.) # clip alpha + else: + for group in self.shape_groups: + if group.stroke_color is not None: + group.stroke_color.data.clamp_(0.0, 1.0) # clip rgba + if group.fill_color is not None: + group.fill_color.data.clamp_(0.0, 1.0) # clip rgba + + def reinitialize_paths(self, + reinit_path: bool = False, + opacity_threshold: float = None, + area_threshold: float = None, + fpath: pathlib.Path = None): + """ + reinitialize paths, also known as 'Reinitializing paths' in VectorFusion paper. + + Args: + reinit_path: whether to reinitialize paths or not. + opacity_threshold: Threshold of opacity. + area_threshold: Threshold of the closed polygon area. + fpath: The path to save the reinitialized SVG. + """ + if not reinit_path: + return + if self.style not in ['iconography', 'low-poly', 'painting']: + return + + def get_keys_below_threshold(my_dict, threshold): + keys_below_threshold = [key for key, value in my_dict.items() if value < threshold] + return keys_below_threshold + + select_path_ids_by_opc = [] + select_path_ids_by_area = [] + if self.style in ['iconography', 'low-poly']: + # re-init by opacity_threshold + if opacity_threshold != 0 and opacity_threshold is not None: + opacity_record_ = {group.shape_ids.item(): group.fill_color[-1].item() + for group in self.cur_shape_groups} + # print("-> opacity_record: ", opacity_record_) + print("-> opacity_record: ", [f"{k}: {v:.3f}" for k, v in opacity_record_.items()]) + select_path_ids_by_opc = get_keys_below_threshold(opacity_record_, opacity_threshold) + print("select_path_ids_by_opc: ", select_path_ids_by_opc) + + # remove path by area_threshold + if area_threshold != 0 and area_threshold is not None: + area_records = [Polygon(shape.points.detach().cpu().numpy()).area for shape in self.cur_shapes] + # print("-> area_records: ", area_records) + print("-> area_records: ", ['%.2f' % i for i in area_records]) + for i, shape in enumerate(self.cur_shapes): + points_ = shape.points.detach().cpu().numpy() + if Polygon(points_).area < area_threshold: + select_path_ids_by_area.append(shape.id) + print("select_path_ids_by_area: ", select_path_ids_by_area) + + elif self.style in ['painting']: + # re-init by opacity_threshold + if opacity_threshold != 0 and opacity_threshold is not None: + opacity_record_ = {group.shape_ids.item(): group.stroke_color[-1].item() + for group in self.cur_shape_groups} + # print("-> opacity_record: ", opacity_record_) + print("-> opacity_record: ", [f"{k}: {v:.3f}" for k, v in opacity_record_.items()]) + select_path_ids_by_opc = get_keys_below_threshold(opacity_record_, opacity_threshold) + print("select_path_ids_by_opc: ", select_path_ids_by_opc) + + # re-init paths + reinit_union = list(set(select_path_ids_by_opc + select_path_ids_by_area)) + if len(reinit_union) > 0: + for i, path in enumerate(self.cur_shapes): + if path.id in reinit_union: + coord = [i, i] if self.style == 'low-poly' else None + self.cur_shapes[i] = self.get_path(coord=coord) + for i, group in enumerate(self.cur_shape_groups): + shp_ids = group.shape_ids.cpu().numpy().tolist() + if set(shp_ids).issubset(reinit_union): + if self.style in ['iconography', 'low-poly']: + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + self.cur_shape_groups[i] = pydiffvg.ShapeGroup( + shape_ids=torch.tensor(list(shp_ids)), + fill_color=fill_color_init, + stroke_color=None) + elif self.style in ['painting']: + stroke_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + stroke_color_init[-1] = 1.0 + self.cur_shape_groups[i] = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + # save reinit svg + self.pretty_save_svg(fpath) + + print("-" * 40) + + def calc_distance_weight(self, loss_weight_keep): + shapes_forsdf = copy.deepcopy(self.cur_shapes) + shape_groups_forsdf = copy.deepcopy(self.cur_shape_groups) + for si in shapes_forsdf: + si.stroke_width = torch.FloatTensor([0]).to(self.device) + for sg_idx, sgi in enumerate(shape_groups_forsdf): + sgi.fill_color = torch.FloatTensor([1, 1, 1, 1]).to(self.device) + sgi.shape_ids = torch.LongTensor([sg_idx]).to(self.device) + + sargs_forsdf = pydiffvg.RenderFunction.serialize_scene( + self.canvas_width, self.canvas_height, shapes_forsdf, shape_groups_forsdf + ) + _render = pydiffvg.RenderFunction.apply + with torch.no_grad(): + im_forsdf = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *sargs_forsdf) + + # use alpha channel is a trick to get 0-1 image + im_forsdf = (im_forsdf[:, :, 3]).detach().cpu().numpy() + loss_weight = get_sdf(im_forsdf, normalize='to1') + loss_weight += loss_weight_keep + loss_weight = np.clip(loss_weight, 0, 1) + loss_weight = torch.FloatTensor(loss_weight).to(self.device) + return loss_weight + + def set_point_parameters(self, id_delta=0): + self.point_vars = [] + for i, path in enumerate(self.cur_shapes): + path.id = i + id_delta # set point id + path.points.requires_grad = True + self.point_vars.append(path.points) + + def get_point_parameters(self): + return self.point_vars + + def set_color_parameters(self): + self.color_vars = [] + for i, group in enumerate(self.cur_shape_groups): + if group.fill_color is not None: + group.fill_color.requires_grad = True + self.color_vars.append(group.fill_color) + if group.stroke_color is not None: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + def get_color_parameters(self): + return self.color_vars + + def set_width_parameters(self): + # stroke`s width optimization + self.width_vars = [] + for i, path in enumerate(self.shapes): + path.stroke_width.requires_grad = True + self.width_vars.append(path.stroke_width) + + def get_width_parameters(self): + return self.width_vars + + def pretty_save_svg(self, filename, width=None, height=None, shapes=None, shape_groups=None): + width = self.canvas_width if width is None else width + height = self.canvas_height if height is None else height + shapes = self.shapes if shapes is None else shapes + shape_groups = self.shape_groups if shape_groups is None else shape_groups + + self.save_svg(filename, width, height, shapes, shape_groups, use_gamma=False, background=None) + + def load_svg(self, path_svg): + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(path_svg) + return canvas_width, canvas_height, shapes, shape_groups + + +def get_sdf(phi, **kwargs): + import skfmm # local import + + phi = (phi - 0.5) * 2 + if (phi.max() <= 0) or (phi.min() >= 0): + return np.zeros(phi.shape).astype(np.float32) + sd = skfmm.distance(phi, dx=1) + + flip_negative = kwargs.get('flip_negative', True) + if flip_negative: + sd = np.abs(sd) + + truncate = kwargs.get('truncate', 10) + sd = np.clip(sd, -truncate, truncate) + # print(f"max sd value is: {sd.max()}") + + zero2max = kwargs.get('zero2max', True) + if zero2max and flip_negative: + sd = sd.max() - sd + elif zero2max: + raise ValueError + + normalize = kwargs.get('normalize', 'sum') + if normalize == 'sum': + sd /= sd.sum() + elif normalize == 'to1': + sd /= sd.max() + return sd + + +class SparseCoordInit: + + def __init__(self, pred, gt, format='[bs x c x 2D]', quantile_interval=200, nodiff_thres=0.1): + if torch.is_tensor(pred): + pred = pred.detach().cpu().numpy() + if torch.is_tensor(gt): + gt = gt.detach().cpu().numpy() + + if format == '[bs x c x 2D]': + self.map = ((pred[0] - gt[0]) ** 2).sum(0) + self.reference_gt = copy.deepcopy(np.transpose(gt[0], (1, 2, 0))) + elif format == ['[2D x c]']: + self.map = (np.abs(pred - gt)).sum(-1) + self.reference_gt = copy.deepcopy(gt[0]) + else: + raise ValueError + + # OptionA: Zero too small errors to avoid the error too small deadloop + self.map[self.map < nodiff_thres] = 0 + quantile_interval = np.linspace(0., 1., quantile_interval) + quantized_interval = np.quantile(self.map, quantile_interval) + # remove redundant + quantized_interval = np.unique(quantized_interval) + quantized_interval = sorted(quantized_interval[1:-1]) + self.map = np.digitize(self.map, quantized_interval, right=False) + self.map = np.clip(self.map, 0, 255).astype(np.uint8) + self.idcnt = {} + for idi in sorted(np.unique(self.map)): + self.idcnt[idi] = (self.map == idi).sum() + # remove smallest one to remove the correct region + self.idcnt.pop(min(self.idcnt.keys())) + + def __call__(self): + if len(self.idcnt) == 0: + h, w = self.map.shape + return [np.random.uniform(0, 1) * w, np.random.uniform(0, 1) * h] + + target_id = max(self.idcnt, key=self.idcnt.get) + _, component, cstats, ccenter = cv2.connectedComponentsWithStats( + (self.map == target_id).astype(np.uint8), + connectivity=4 + ) + # remove cid = 0, it is the invalid area + csize = [ci[-1] for ci in cstats[1:]] + target_cid = csize.index(max(csize)) + 1 + center = ccenter[target_cid][::-1] + coord = np.stack(np.where(component == target_cid)).T + dist = np.linalg.norm(coord - center, axis=1) + target_coord_id = np.argmin(dist) + coord_h, coord_w = coord[target_coord_id] + + # replace_sampling + self.idcnt[target_id] -= max(csize) + if self.idcnt[target_id] == 0: + self.idcnt.pop(target_id) + self.map[component == target_cid] = 0 + return [coord_w, coord_h] + + +class RandomCoordInit: + def __init__(self, canvas_width, canvas_height): + self.canvas_width, self.canvas_height = canvas_width, canvas_height + + def __call__(self): + w, h = self.canvas_width, self.canvas_height + return [np.random.uniform(0, 1) * w, np.random.uniform(0, 1) * h] + + +class NaiveCoordInit: + def __init__(self, pred, gt, format='[bs x c x 2D]', replace_sampling=True): + if isinstance(pred, torch.Tensor): + pred = pred.detach().cpu().numpy() + if isinstance(gt, torch.Tensor): + gt = gt.detach().cpu().numpy() + + if format == '[bs x c x 2D]': + self.map = ((pred[0] - gt[0]) ** 2).sum(0) + elif format == ['[2D x c]']: + self.map = ((pred - gt) ** 2).sum(-1) + else: + raise ValueError + self.replace_sampling = replace_sampling + + def __call__(self): + coord = np.where(self.map == self.map.max()) + coord_h, coord_w = coord[0][0], coord[1][0] + if self.replace_sampling: + self.map[coord_h, coord_w] = -1 + return [coord_w, coord_h] + + +class PainterOptimizer: + + def __init__(self, + renderer: Painter, + style: str, + num_iter: int, + lr_config: omegaconf.DictConfig, + trainable_bg: bool = False): + self.renderer = renderer + self.num_iter = num_iter + self.trainable_bg = trainable_bg + self.lr_config = lr_config + + # set optimized params via style + self.optim_point, self.optim_color, self.optim_width = { + "iconography": (True, True, False), + "pixelart": (False, True, False), + "low-poly": (True, True, False), + "sketch": (True, True, False), + "ink": (True, True, True), + "painting": (True, True, True) + }.get(style, (False, False, False)) + self.optim_bg = trainable_bg + + # set lr schedule + schedule_cfg = lr_config.schedule + if schedule_cfg.name == 'linear': + self.lr_lambda = LinearDecayWithKeepLRLambda(init_lr=lr_config.point, + keep_ratio=schedule_cfg.keep_ratio, + decay_every=self.num_iter, + decay_ratio=schedule_cfg.decay_ratio) + elif schedule_cfg.name == 'cosine': + self.lr_lambda = CosineWithWarmupLRLambda(num_steps=self.num_iter, + warmup_steps=schedule_cfg.warmup_steps, + warmup_start_lr=schedule_cfg.warmup_start_lr, + warmup_end_lr=schedule_cfg.warmup_end_lr, + cosine_end_lr=schedule_cfg.cosine_end_lr) + else: + print(f"{schedule_cfg.name} is not support.") + self.lr_lambda = None + + self.point_optimizer = None + self.color_optimizer = None + self.width_optimizer = None + self.bg_optimizer = None + self.point_scheduler = None + + def init_optimizers(self, pid_delta: int = 0): + # optimizer + optim_cfg = self.lr_config.optim + optim_name = optim_cfg.name + + params = {} + if self.optim_point: + self.renderer.set_point_parameters(pid_delta) + params['point'] = self.renderer.get_point_parameters() + self.point_optimizer = get_optimizer(optim_name, params['point'], self.lr_config.point, optim_cfg) + + if self.optim_color: + self.renderer.set_color_parameters() + params['color'] = self.renderer.get_color_parameters() + self.color_optimizer = get_optimizer(optim_name, params['color'], self.lr_config.color, optim_cfg) + + if self.optim_width: + self.renderer.set_width_parameters() + params['width'] = self.renderer.get_width_parameters() + if len(params['width']) > 0: + self.width_optimizer = get_optimizer(optim_name, params['width'], self.lr_config.width, optim_cfg) + + if self.optim_bg: + self.renderer.para_bg.requires_grad = True + self.bg_optimizer = get_optimizer(optim_name, self.renderer.para_bg, self.lr_config.bg, optim_cfg) + + # lr schedule + if self.lr_lambda is not None and self.optim_point: + self.point_scheduler = LambdaLR(self.point_optimizer, lr_lambda=self.lr_lambda, last_epoch=-1) + + def update_lr(self): + if self.point_scheduler is not None: + self.point_scheduler.step() + + def zero_grad_(self): + if self.point_optimizer is not None: + self.point_optimizer.zero_grad() + if self.color_optimizer is not None: + self.color_optimizer.zero_grad() + if self.width_optimizer is not None: + self.width_optimizer.zero_grad() + if self.bg_optimizer is not None: + self.bg_optimizer.zero_grad() + + def step_(self): + if self.point_optimizer is not None: + self.point_optimizer.step() + if self.color_optimizer is not None: + self.color_optimizer.step() + if self.width_optimizer is not None: + self.width_optimizer.step() + if self.bg_optimizer is not None: + self.bg_optimizer.step() + + def get_lr(self) -> Dict: + lr = {} + if self.point_optimizer is not None: + lr['pnt'] = self.point_optimizer.param_groups[0]['lr'] + if self.color_optimizer is not None: + lr['clr'] = self.color_optimizer.param_groups[0]['lr'] + if self.width_optimizer is not None: + lr['wd'] = self.width_optimizer.param_groups[0]['lr'] + if self.bg_optimizer is not None: + lr['bg'] = self.bg_optimizer.param_groups[0]['lr'] + return lr + + +class LinearDecayWithKeepLRLambda: + """apply in LIVE stage""" + + def __init__(self, init_lr, keep_ratio, decay_every, decay_ratio): + self.init_lr = init_lr + self.keep_ratio = keep_ratio + self.decay_every = decay_every + self.decay_ratio = decay_ratio + + def __call__(self, n): + if n < self.keep_ratio * self.decay_every: + return self.init_lr + + decay_time = n // self.decay_every + decay_step = n % self.decay_every + lr_s = self.decay_ratio ** decay_time + lr_e = self.decay_ratio ** (decay_time + 1) + r = decay_step / self.decay_every + lr = lr_s * (1 - r) + lr_e * r + return lr + + +class CosineWithWarmupLRLambda: + """apply in fine-tuning stage""" + + def __init__(self, num_steps, warmup_steps, warmup_start_lr, warmup_end_lr, cosine_end_lr): + self.n_steps = num_steps + self.n_warmup = warmup_steps + self.warmup_start_lr = warmup_start_lr + self.warmup_end_lr = warmup_end_lr + self.cosine_end_lr = cosine_end_lr + + def __call__(self, n): + if n < self.n_warmup: + # linearly warmup + return self.warmup_start_lr + (n / self.n_warmup) * (self.warmup_end_lr - self.warmup_start_lr) + else: + # cosine decayed schedule + return self.cosine_end_lr + 0.5 * (self.warmup_end_lr - self.cosine_end_lr) * ( + 1 + math.cos(math.pi * (n - self.n_warmup) / (self.n_steps - self.n_warmup))) diff --git a/pytorch_svgrender/painter/vectorfusion/LSDS_SDXL_pipeline.py b/pytorch_svgrender/painter/vectorfusion/LSDS_SDXL_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..6e281262f8519f67b568f7d22677544e152859e6 --- /dev/null +++ b/pytorch_svgrender/painter/vectorfusion/LSDS_SDXL_pipeline.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from typing import Callable, List, Optional, Union, Tuple + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd +from torchvision import transforms +from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput +from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipeline + + +class LSDSSDXLPipeline(StableDiffusionXLPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion XL. + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + prompt_2: Optional[Union[str, List[str]]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + denoising_end: Optional[float] = None, + guidance_scale: float = 5.0, + negative_prompt: Optional[Union[str, List[str]]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + used in both text-encoders + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + denoising_end (`float`, *optional*): + When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be + completed before it is intentionally prematurely terminated. As a result, the returned sample will + still retain a substantial amount of noise as determined by the discrete timesteps selected by the + scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a + "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image + Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) + guidance_scale (`float`, *optional*, defaults to 5.0): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead + of a plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. + `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as + explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): + `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position + `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting + `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of + [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): + For most cases, `target_size` should be set to the desired height and width of the generated image. If + not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in + section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). + + Examples: + + Returns: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a + `tuple`. When returning a tuple, the first element is a list with the generated images. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, prompt_2, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + ( + text_embeddings, + negative_text_embeddings, + pooled_text_embeddings, + negative_pooled_text_embeddings, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + try: + num_channels_latents = self.unet.config.in_channels + except Exception or Warning: + num_channels_latents = self.unet.in_channels + + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + text_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. inherit TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Prepare added time ids & embeddings + add_text_embeddings = pooled_text_embeddings + add_time_ids = self._get_add_time_ids( + original_size, crops_coords_top_left, target_size, dtype=text_embeddings.dtype + ) + + if do_classifier_free_guidance: + text_embeddings = torch.cat([negative_text_embeddings, text_embeddings], dim=0) + add_text_embeddings = torch.cat([negative_pooled_text_embeddings, add_text_embeddings], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + text_embeddings = text_embeddings.to(device) + add_text_embeddings = add_text_embeddings.to(device) + add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) + + # 8. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + + # 8.1 Apply denoising_end + if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: + discrete_timestep_cutoff = int( + round( + self.scheduler.config.num_train_timesteps + - (denoising_end * self.scheduler.config.num_train_timesteps) + ) + ) + num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) + timesteps = timesteps[:num_inference_steps] + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeddings, "time_ids": add_time_ids} + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=text_embeddings, + added_cond_kwargs=added_cond_kwargs + ).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 9. Post-processing + + # The decode_latents method is deprecated and has been removed in sdxl + # image = self.decode_latents(latents) + + # make sure the VAE is in float32 mode, as it overflows in float16 + if self.vae.dtype == torch.float16 and self.vae.config.force_upcast: + self.upcast_vae() + latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) + + if not output_type == "latent": + image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + else: + image = latents + return StableDiffusionXLPipelineOutput(images=image) + + # apply watermark if available + if self.watermark is not None: + image = self.watermark.apply_watermark(image) + + image = self.image_processor.postprocess(image, output_type=output_type) + + if not return_dict: + return (image,) + + return StableDiffusionXLPipelineOutput(images=image) + + def encode_(self, images): + images = (2 * images - 1).clamp(-1.0, 1.0) # images: [B, 3, H, W] + + # encode images + latents = self.vae.encode(images).latent_dist.sample() + latents = self.vae.config.scaling_factor * latents + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + return latents + + def x_augment(self, x: torch.Tensor, img_size: int = 1024): + augment_compose = transforms.Compose([ + transforms.RandomPerspective(distortion_scale=0.5, p=0.7), + transforms.RandomCrop(size=(img_size, img_size), pad_if_needed=True, padding_mode='reflect') + ]) + return augment_compose(x) + + def score_distillation_sampling(self, + pred_rgb: torch.Tensor, + im_size: int, + prompt: Union[List, str], + prompt_2: Optional[Union[List, str]] = None, + height: Optional[int] = None, + width: Optional[int] = None, + negative_prompt: Union[List, str] = None, + negative_prompt_2: Optional[Union[List, str]] = None, + guidance_scale: float = 100, + as_latent: bool = False, + grad_scale: float = 1, + t_range: Union[List[float], Tuple[float]] = (0.05, 0.95), + original_size: Optional[Tuple[int, int]] = None, + crops_coords_top_left: Tuple[int, int] = (0, 0), + target_size: Optional[Tuple[int, int]] = None): + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + original_size = original_size or (height, width) + target_size = target_size or (height, width) + + batch_size = 1 if isinstance(prompt, str) else len(prompt) + + num_train_timesteps = self.scheduler.config.num_train_timesteps + min_step = int(num_train_timesteps * t_range[0]) + max_step = int(num_train_timesteps * t_range[1]) + alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + # input augmentation + pred_rgb_a = self.x_augment(pred_rgb, im_size) + + # interp to im_size x im_size to be fed into vae. + if as_latent: + latents = F.interpolate(pred_rgb_a, (128, 128), mode='bilinear', align_corners=False) * 2 - 1 + else: + # encode image into latents with vae, requires grad! + latents = self.encode_(pred_rgb_a) + + # Encode input prompt + num_images_per_prompt = 1 # the number of images to generate per prompt + do_classifier_free_guidance = guidance_scale > 1.0 + ( + text_embeddings, + negative_text_embeddings, + pooled_text_embeddings, + negative_pooled_text_embeddings, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + device=self.device, + num_images_per_prompt=num_images_per_prompt, + do_classifier_free_guidance=do_classifier_free_guidance, + negative_prompt=negative_prompt, + negative_prompt_2=negative_prompt_2, + ) + + # timestep ~ U(0.05, 0.95) to avoid very high/low noise level + t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) + + # 7. Prepare added time ids & embeddings + add_text_embeddings = pooled_text_embeddings + add_time_ids = self._get_add_time_ids( + original_size, crops_coords_top_left, target_size, dtype=text_embeddings.dtype + ) + + if do_classifier_free_guidance: + text_embeddings = torch.cat([negative_text_embeddings, text_embeddings], dim=0) + add_text_embeddings = torch.cat([negative_pooled_text_embeddings, add_text_embeddings], dim=0) + add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) + + text_embeddings = text_embeddings.to(self.device) + add_text_embeddings = add_text_embeddings.to(self.device) + add_time_ids = add_time_ids.to(self.device).repeat(batch_size * num_images_per_prompt, 1) + + # predict the noise residual with unet, stop gradient + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) if do_classifier_free_guidance else latents_noisy + # predict the noise residual + added_cond_kwargs = {"text_embeds": add_text_embeddings, "time_ids": add_time_ids} + noise_pred = self.unet( + latent_model_input, + t, + encoder_hidden_states=text_embeddings, + added_cond_kwargs=added_cond_kwargs + ).sample + + # perform guidance (high scale from paper!) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + + # w(t), sigma_t^2 + w = (1 - alphas[t]) + grad = grad_scale * w * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + # since we omitted an item in grad, we need to use the custom function to specify the gradient + loss = SpecifyGradient.apply(latents, grad) + + return loss, grad.mean() + + +class SpecifyGradient(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, input_tensor, gt_grad): + ctx.save_for_backward(gt_grad) + # we return a dummy value 1, which will be scaled by amp's scaler so we get the scale in backward. + return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype) + + @staticmethod + @custom_bwd + def backward(ctx, grad_scale): + gt_grad, = ctx.saved_tensors + gt_grad = gt_grad * grad_scale + return gt_grad, None diff --git a/pytorch_svgrender/painter/vectorfusion/LSDS_pipeline.py b/pytorch_svgrender/painter/vectorfusion/LSDS_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..64d68a0bc9e41fa64160937b5294b98d1c86ee25 --- /dev/null +++ b/pytorch_svgrender/painter/vectorfusion/LSDS_pipeline.py @@ -0,0 +1,299 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from typing import Callable, List, Optional, Union, Tuple + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd +from torchvision import transforms +from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput +from diffusers.pipelines.stable_diffusion import StableDiffusionPipeline + + +class LSDSPipeline(StableDiffusionPipeline): + r""" + Pipeline for text-to-image generation using Stable Diffusion. + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`CLIPTextModel`]): + Frozen text-encoder. Stable Diffusion uses the text portion of + [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically + the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. + tokenizer (`CLIPTokenizer`): + Tokenizer of class + [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of + [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. + safety_checker ([`StableDiffusionSafetyChecker`]): + Classification module that estimates whether generated images could be considered offensive or harmful. + Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. + feature_extractor ([`CLIPFeatureExtractor`]): + Model that extracts features from generated images to be used as inputs for the `safety_checker`. + """ + _optional_components = ["safety_checker", "feature_extractor"] + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]], + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 50, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, + callback_steps: Optional[int] = 1, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored + if `guidance_scale` is less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + + Returns: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: + [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. + When returning a tuple, the first element is a list with the generated images, and the second element is a + list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" + (nsfw) content, according to the `safety_checker`. + """ + + # 0. Default height and width to unet + height = height or self.unet.config.sample_size * self.vae_scale_factor + width = width or self.unet.config.sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs(prompt, height, width, callback_steps) + + # 2. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + device = self._execution_device + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # 3. Encode input prompt + text_embeddings = self._encode_prompt( + prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + + # 4. Prepare timesteps + self.scheduler.set_timesteps(num_inference_steps, device=device) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + try: + num_channels_latents = self.unet.config.in_channels + except Exception or Warning: + num_channels_latents = self.unet.in_channels + + latents = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + text_embeddings.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. inherit TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + # predict the noise residual + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + if callback is not None and i % callback_steps == 0: + callback(i, t, latents) + + # 8. Post-processing + image = self.decode_latents(latents) + + # image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] + # do_denormalize = [True] * image.shape[0] + # image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) + + # 9. Run safety checker + has_nsfw_concept = None + # image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) + + # 10. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image, has_nsfw_concept) + + return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) + + def encode_(self, images): + images = (2 * images - 1).clamp(-1.0, 1.0) # images: [B, 3, H, W] + + # encode images + latents = self.vae.encode(images).latent_dist.sample() + latents = self.vae.config.scaling_factor * latents + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + + return latents + + def x_augment(self, x: torch.Tensor, img_size: int = 512): + augment_compose = transforms.Compose([ + transforms.RandomPerspective(distortion_scale=0.5, p=0.7), + transforms.RandomCrop(size=(img_size, img_size), pad_if_needed=True, padding_mode='reflect') + ]) + return augment_compose(x) + + def score_distillation_sampling(self, + pred_rgb: torch.Tensor, + im_size: int, + prompt: Union[List, str], + negative_prompt: Union[List, str] = None, + guidance_scale: float = 100, + as_latent: bool = False, + grad_scale: float = 1, + t_range: Union[List[float], Tuple[float]] = (0.05, 0.95)): + num_train_timesteps = self.scheduler.config.num_train_timesteps + min_step = int(num_train_timesteps * t_range[0]) + max_step = int(num_train_timesteps * t_range[1]) + alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience + + # input augmentation + pred_rgb_a = self.x_augment(pred_rgb, im_size) + + # the input is intercepted to im_size x im_size and then fed to the vae + if as_latent: + latents = F.interpolate(pred_rgb_a, (64, 64), mode='bilinear', align_corners=False) * 2 - 1 + else: + # encode image into latents with vae, requires grad! + latents = self.encode_(pred_rgb_a) + + # Encode input prompt + num_images_per_prompt = 1 # the number of images to generate per prompt + do_classifier_free_guidance = guidance_scale > 1.0 + text_embeddings = self._encode_prompt( + prompt, self.device, num_images_per_prompt, + do_classifier_free_guidance, + negative_prompt=negative_prompt, + ) + + # timestep ~ U(0.05, 0.95) to avoid very high/low noise level + t = torch.randint(min_step, max_step + 1, [1], dtype=torch.long, device=self.device) + + # predict the noise residual with unet, stop gradient + with torch.no_grad(): + # add noise + noise = torch.randn_like(latents) + latents_noisy = self.scheduler.add_noise(latents, noise, t) + # pred noise + latent_model_input = torch.cat([latents_noisy] * 2) if do_classifier_free_guidance else latents_noisy + noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample + + # perform guidance (high scale from paper!) + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_pos = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_pos - noise_pred_uncond) + + # w(t), sigma_t^2 + w = (1 - alphas[t]) + grad = grad_scale * w * (noise_pred - noise) + grad = torch.nan_to_num(grad) + + # since we omitted an item in grad, we need to use the custom function to specify the gradient + loss = SpecifyGradient.apply(latents, grad) + + return loss, grad.mean() + + +class SpecifyGradient(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, input_tensor, gt_grad): + ctx.save_for_backward(gt_grad) + # we return a dummy value 1, which will be scaled by amp's scaler so we get the scale in backward. + return torch.ones([1], device=input_tensor.device, dtype=input_tensor.dtype) + + @staticmethod + @custom_bwd + def backward(ctx, grad_scale): + gt_grad, = ctx.saved_tensors + gt_grad = gt_grad * grad_scale + return gt_grad, None diff --git a/pytorch_svgrender/painter/vectorfusion/__init__.py b/pytorch_svgrender/painter/vectorfusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5331fc5bdf7da076ce01da159dcf3ca79b94d464 --- /dev/null +++ b/pytorch_svgrender/painter/vectorfusion/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .LSDS_pipeline import LSDSPipeline +from .LSDS_SDXL_pipeline import LSDSSDXLPipeline +from .painter_params import Painter, PainterOptimizer +from .loss import channel_saturation_penalty_loss \ No newline at end of file diff --git a/pytorch_svgrender/painter/vectorfusion/loss.py b/pytorch_svgrender/painter/vectorfusion/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ba6d62b5c01896cb4a84db063cc45e402ea9e32f --- /dev/null +++ b/pytorch_svgrender/painter/vectorfusion/loss.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import torch + + +def channel_saturation_penalty_loss(x: torch.Tensor): + assert x.shape[1] == 3 + r_channel = x[:, 0, :, :] + g_channel = x[:, 1, :, :] + b_channel = x[:, 2, :, :] + channel_accumulate = torch.pow(r_channel, 2) + torch.pow(g_channel, 2) + torch.pow(b_channel, 2) + return channel_accumulate.mean() / 3 diff --git a/pytorch_svgrender/painter/vectorfusion/painter_params.py b/pytorch_svgrender/painter/vectorfusion/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..069a4d6f94a1ff2bde234b6ccfe3433365882ebd --- /dev/null +++ b/pytorch_svgrender/painter/vectorfusion/painter_params.py @@ -0,0 +1,808 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import math +import copy +import random +import pathlib +from typing import Dict + +from shapely.geometry.polygon import Polygon +import omegaconf +import cv2 +import numpy as np +import pydiffvg +import torch +from torch.optim.lr_scheduler import LambdaLR + +from pytorch_svgrender.diffvg_warp import DiffVGState +from pytorch_svgrender.libs.solver.optim import get_optimizer + + +class Painter(DiffVGState): + + def __init__( + self, + diffvg_cfg: omegaconf.DictConfig, + style: str, + num_segments: int, + segment_init: str, + radius: int = 20, + canvas_size: int = 600, + n_grid: int = 32, + trainable_bg: bool = False, + stroke_width: int = 3, + path_svg=None, + device=None, + ): + super().__init__(device, print_timing=diffvg_cfg.print_timing, + canvas_width=canvas_size, canvas_height=canvas_size) + + self.style = style + + self.num_segments = num_segments + self.segment_init = segment_init + self.radius = radius + + """pixelart params""" + self.n_grid = n_grid # divide the canvas into n grids + self.pixel_per_grid = self.canvas_width // self.n_grid + """sketch params""" + self.stroke_width = stroke_width + """iconography params""" + self.color_ref = None + + self.path_svg = path_svg + self.optimize_flag = [] + + self.strokes_counter = 0 # counts the number of calls to "get_path" + + # Background color + self.para_bg = torch.tensor([1., 1., 1.], requires_grad=trainable_bg, device=self.device) + + self.target_img = None + self.pos_init_method = None + + def component_wise_path_init(self, gt, pred, init_type: str = 'sparse'): + # set target image + self.target_img = gt + + if init_type == 'random': + self.pos_init_method = RandomCoordInit(self.canvas_height, self.canvas_width) + elif init_type == 'sparse': + # when initialized for the first time, the render result is None + if pred is None: + pred = self.para_bg.view(1, -1, 1, 1).repeat(1, 1, self.canvas_height, self.canvas_width) + # then pred is the render result + self.pos_init_method = SparseCoordInit(pred, gt) + elif init_type == 'naive': + if pred is None: + pred = self.para_bg.view(1, -1, 1, 1).repeat(1, 1, self.canvas_height, self.canvas_width) + self.pos_init_method = NaiveCoordInit(pred, gt) + else: + raise NotImplementedError(f"'{init_type}' is not support.") + + def init_image(self, stage=0, num_paths=0): + self.cur_shapes, self.cur_shape_groups = [], [] + + # or init svg by pydiffvg + if self.style in ['pixelart', 'low-poly']: # update path definition + num_paths = self.n_grid + + if stage > 0: + # Noting: if multi stages training than add new strokes on existing ones + # don't optimize on previous strokes + self.optimize_flag = [False for i in range(len(self.shapes))] + for i in range(num_paths): + if self.style == 'iconography': + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([self.strokes_counter - 1]), + fill_color=fill_color_init, + stroke_color=None + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + self.optimize_flag.append(True) + + elif self.style in ['pixelart', 'low-poly']: + for j in range(num_paths): + path = self.get_path(coord=[i, j]) + self.shapes.append(path) + self.cur_shapes.append(path) + + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.LongTensor([i * num_paths + j]), + fill_color=fill_color_init, + stroke_color=None, + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + self.optimize_flag.append(True) + + elif self.style in ['ink', 'sketch']: + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + stroke_color_init = [0.0, 0.0, 0.0] + [random.random()] + stroke_color_init = torch.FloatTensor(stroke_color_init) + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style == 'painting': + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + wref, href = self.color_ref + wref = max(0, min(int(wref), self.canvas_width - 1)) + href = max(0, min(int(href), self.canvas_height - 1)) + stroke_color_init = list(self.target_img[0, :, href, wref]) + [1.] + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=torch.FloatTensor(stroke_color_init) + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + else: + num_paths_exists = 0 + if self.path_svg is not None and pathlib.Path(self.path_svg).exists(): + print(f"-> init svg from `{self.path_svg}` ...") + + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups = self.load_svg(self.path_svg) + # if you want to add more strokes to existing ones and optimize on all of them + num_paths_exists = len(self.shapes) + + self.cur_shapes = self.shapes + self.cur_shape_groups = self.shape_groups + + for i in range(num_paths_exists, num_paths): + if self.style == 'iconography': + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + wref, href = self.color_ref + wref = max(0, min(int(wref), self.canvas_width - 1)) + href = max(0, min(int(href), self.canvas_height - 1)) + fill_color_init = list(self.target_img[0, :, href, wref]) + [1.] + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([self.strokes_counter - 1]), + fill_color=torch.FloatTensor(fill_color_init), + stroke_color=None + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style in ['pixelart', 'low-poly']: + for j in range(num_paths): + path = self.get_path(coord=[i, j]) + self.shapes.append(path) + self.cur_shapes.append(path) + + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.LongTensor([i * num_paths + j]), + fill_color=fill_color_init, + stroke_color=None, + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style in ['sketch', 'ink']: + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + stroke_color_init = [0.0, 0.0, 0.0] + [random.random()] + stroke_color_init = torch.FloatTensor(stroke_color_init) + + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + elif self.style in ['painting']: + path = self.get_path() + self.shapes.append(path) + self.cur_shapes.append(path) + + if self.color_ref is None: + stroke_color_val = np.random.uniform(size=[4]) + stroke_color_val[-1] = 1.0 + stroke_color_init = torch.FloatTensor(stroke_color_val) + else: + wref, href = self.color_ref + wref = max(0, min(int(wref), self.canvas_width - 1)) + href = max(0, min(int(href), self.canvas_height - 1)) + stroke_color_init = list(self.target_img[0, :, href, wref]) + [1.] + stroke_color_init = torch.FloatTensor(stroke_color_init) + + path_group = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + self.shape_groups.append(path_group) + self.cur_shape_groups.append(path_group) + + self.optimize_flag = [True for i in range(len(self.shapes))] + + img = self.get_image() + return img + + def get_image(self, step: int = 0): + img = self.render_warp(step) + img = img[:, :, 3:4] * img[:, :, :3] + self.para_bg * (1 - img[:, :, 3:4]) + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_path(self, coord=None): + num_segments = self.num_segments + + points = [] + if self.style == 'iconography': + # init segment + if self.segment_init == 'circle': + num_control_points = [2] * num_segments + radius = self.radius if self.radius is not None else np.random.uniform(0.5, 1) + if self.pos_init_method is not None: + center = self.pos_init_method() + else: + center = (random.random(), random.random()) + bias = center + self.color_ref = copy.deepcopy(bias) + + avg_degree = 360 / (num_segments * 3) + for i in range(0, num_segments * 3): + point = ( + np.cos(np.deg2rad(i * avg_degree)), np.sin(np.deg2rad(i * avg_degree)) + ) + points.append(point) + + points = torch.FloatTensor(points) * radius + torch.FloatTensor(bias).unsqueeze(dim=0) + elif self.segment_init == 'random': + num_control_points = [2] * num_segments + p0 = self.pos_init_method() + self.color_ref = copy.deepcopy(p0) + points.append(p0) + + for j in range(num_segments): + radius = self.radius + p1 = (p0[0] + radius * np.random.uniform(-0.5, 0.5), + p0[1] + radius * np.random.uniform(-0.5, 0.5)) + p2 = (p1[0] + radius * np.random.uniform(-0.5, 0.5), + p1[1] + radius * np.random.uniform(-0.5, 0.5)) + p3 = (p2[0] + radius * np.random.uniform(-0.5, 0.5), + p2[1] + radius * np.random.uniform(-0.5, 0.5)) + points.append(p1) + points.append(p2) + if j < num_segments - 1: + points.append(p3) + p0 = p3 + points = torch.FloatTensor(points) + else: + raise NotImplementedError(f"{self.segment_init} is not exists.") + + path = pydiffvg.Path( + num_control_points=torch.LongTensor(num_control_points), + points=points, + stroke_width=torch.tensor(0.0), + is_closed=True + ) + + elif self.style in ['sketch', 'painting', 'ink']: + num_control_points = torch.zeros(num_segments, dtype=torch.long) + 2 + points = [] + p0 = [random.random(), random.random()] + points.append(p0) + + # select color by first point coordinate + color_ref = copy.deepcopy(p0) + color_ref[0] *= self.canvas_width + color_ref[1] *= self.canvas_height + self.color_ref = color_ref + + for j in range(num_segments): + radius = 0.1 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + points.append(p3) + p0 = p3 + points = torch.tensor(points).to(self.device) + points[:, 0] *= self.canvas_width + points[:, 1] *= self.canvas_height + + path = pydiffvg.Path(num_control_points=torch.LongTensor(num_control_points), + points=points, + stroke_width=torch.tensor(float(self.stroke_width)), + is_closed=False) + + elif self.style in ['pixelart', 'low-poly']: + x = coord[0] * self.pixel_per_grid + y = coord[1] * self.pixel_per_grid + points = torch.FloatTensor([ + [x, y], + [x + self.pixel_per_grid, y], + [x + self.pixel_per_grid, y + self.pixel_per_grid], + [x, y + self.pixel_per_grid] + ]).to(self.device) + path = pydiffvg.Polygon(points=points, + stroke_width=torch.tensor(0.0), + is_closed=True) + + self.strokes_counter += 1 + return path + + def clip_curve_shape(self): + if self.style in ['sketch', 'ink']: + for group in self.shape_groups: + group.stroke_color.data[:3].clamp_(0., 0.) # to force black stroke + group.stroke_color.data[-1].clamp_(0., 1.) # clip alpha + else: + for group in self.shape_groups: + if group.stroke_color is not None: + group.stroke_color.data.clamp_(0.0, 1.0) # clip rgba + if group.fill_color is not None: + group.fill_color.data.clamp_(0.0, 1.0) # clip rgba + + def reinitialize_paths(self, + reinit_path: bool = False, + opacity_threshold: float = None, + area_threshold: float = None, + fpath: pathlib.Path = None): + """ + reinitialize paths, also known as 'Reinitializing paths' in VectorFusion paper. + + Args: + reinit_path: whether to reinitialize paths or not. + opacity_threshold: Threshold of opacity. + area_threshold: Threshold of the closed polygon area. + fpath: The path to save the reinitialized SVG. + """ + if not reinit_path: + return + if self.style not in ['iconography', 'low-poly', 'painting']: + return + + def get_keys_below_threshold(my_dict, threshold): + keys_below_threshold = [key for key, value in my_dict.items() if value < threshold] + return keys_below_threshold + + select_path_ids_by_opc = [] + select_path_ids_by_area = [] + if self.style in ['iconography', 'low-poly']: + # re-init by opacity_threshold + if opacity_threshold != 0 and opacity_threshold is not None: + opacity_record_ = {group.shape_ids.item(): group.fill_color[-1].item() + for group in self.cur_shape_groups} + # print("-> opacity_record: ", opacity_record_) + print("-> opacity_record: ", [f"{k}: {v:.3f}" for k, v in opacity_record_.items()]) + select_path_ids_by_opc = get_keys_below_threshold(opacity_record_, opacity_threshold) + print("select_path_ids_by_opc: ", select_path_ids_by_opc) + + # remove path by area_threshold + if area_threshold != 0 and area_threshold is not None: + area_records = [Polygon(shape.points.detach().cpu().numpy()).area for shape in self.cur_shapes] + # print("-> area_records: ", area_records) + print("-> area_records: ", ['%.2f' % i for i in area_records]) + for i, shape in enumerate(self.cur_shapes): + points_ = shape.points.detach().cpu().numpy() + if Polygon(points_).area < area_threshold: + select_path_ids_by_area.append(shape.id) + print("select_path_ids_by_area: ", select_path_ids_by_area) + + elif self.style in ['painting']: + # re-init by opacity_threshold + if opacity_threshold != 0 and opacity_threshold is not None: + opacity_record_ = {group.shape_ids.item(): group.stroke_color[-1].item() + for group in self.cur_shape_groups} + # print("-> opacity_record: ", opacity_record_) + print("-> opacity_record: ", [f"{k}: {v:.3f}" for k, v in opacity_record_.items()]) + select_path_ids_by_opc = get_keys_below_threshold(opacity_record_, opacity_threshold) + print("select_path_ids_by_opc: ", select_path_ids_by_opc) + + # re-init paths + reinit_union = list(set(select_path_ids_by_opc + select_path_ids_by_area)) + if len(reinit_union) > 0: + for i, path in enumerate(self.cur_shapes): + if path.id in reinit_union: + coord = [i, i] if self.style == 'low-poly' else None + self.cur_shapes[i] = self.get_path(coord=coord) + for i, group in enumerate(self.cur_shape_groups): + shp_ids = group.shape_ids.cpu().numpy().tolist() + if set(shp_ids).issubset(reinit_union): + if self.style in ['iconography', 'low-poly']: + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + self.cur_shape_groups[i] = pydiffvg.ShapeGroup( + shape_ids=torch.tensor(list(shp_ids)), + fill_color=fill_color_init, + stroke_color=None) + elif self.style in ['painting']: + stroke_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + stroke_color_init[-1] = 1.0 + self.cur_shape_groups[i] = pydiffvg.ShapeGroup( + shape_ids=torch.tensor([len(self.shapes) - 1]), + fill_color=None, + stroke_color=stroke_color_init + ) + # save reinit svg + self.pretty_save_svg(fpath) + + print("-" * 40) + + def calc_distance_weight(self, loss_weight_keep): + shapes_forsdf = copy.deepcopy(self.cur_shapes) + shape_groups_forsdf = copy.deepcopy(self.cur_shape_groups) + for si in shapes_forsdf: + si.stroke_width = torch.FloatTensor([0]).to(self.device) + for sg_idx, sgi in enumerate(shape_groups_forsdf): + sgi.fill_color = torch.FloatTensor([1, 1, 1, 1]).to(self.device) + sgi.shape_ids = torch.LongTensor([sg_idx]).to(self.device) + + sargs_forsdf = pydiffvg.RenderFunction.serialize_scene( + self.canvas_width, self.canvas_height, shapes_forsdf, shape_groups_forsdf + ) + _render = pydiffvg.RenderFunction.apply + with torch.no_grad(): + im_forsdf = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *sargs_forsdf) + + # use alpha channel is a trick to get 0-1 image + im_forsdf = (im_forsdf[:, :, 3]).detach().cpu().numpy() + loss_weight = get_sdf(im_forsdf, normalize='to1') + loss_weight += loss_weight_keep + loss_weight = np.clip(loss_weight, 0, 1) + loss_weight = torch.FloatTensor(loss_weight).to(self.device) + return loss_weight + + def set_point_parameters(self, id_delta=0): + self.point_vars = [] + for i, path in enumerate(self.cur_shapes): + path.id = i + id_delta # set point id + path.points.requires_grad = True + self.point_vars.append(path.points) + + def get_point_parameters(self): + return self.point_vars + + def set_color_parameters(self): + self.color_vars = [] + for i, group in enumerate(self.cur_shape_groups): + if group.fill_color is not None: + group.fill_color.requires_grad = True + self.color_vars.append(group.fill_color) + if group.stroke_color is not None: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + def get_color_parameters(self): + return self.color_vars + + def set_width_parameters(self): + # stroke`s width optimization + self.width_vars = [] + for i, path in enumerate(self.shapes): + path.stroke_width.requires_grad = True + self.width_vars.append(path.stroke_width) + + def get_width_parameters(self): + return self.width_vars + + def pretty_save_svg(self, filename, width=None, height=None, shapes=None, shape_groups=None): + width = self.canvas_width if width is None else width + height = self.canvas_height if height is None else height + shapes = self.shapes if shapes is None else shapes + shape_groups = self.shape_groups if shape_groups is None else shape_groups + + self.save_svg(filename, width, height, shapes, shape_groups, use_gamma=False, background=None) + + def load_svg(self, path_svg): + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(path_svg) + return canvas_width, canvas_height, shapes, shape_groups + + +def get_sdf(phi, **kwargs): + import skfmm # local import + + phi = (phi - 0.5) * 2 + if (phi.max() <= 0) or (phi.min() >= 0): + return np.zeros(phi.shape).astype(np.float32) + sd = skfmm.distance(phi, dx=1) + + flip_negative = kwargs.get('flip_negative', True) + if flip_negative: + sd = np.abs(sd) + + truncate = kwargs.get('truncate', 10) + sd = np.clip(sd, -truncate, truncate) + # print(f"max sd value is: {sd.max()}") + + zero2max = kwargs.get('zero2max', True) + if zero2max and flip_negative: + sd = sd.max() - sd + elif zero2max: + raise ValueError + + normalize = kwargs.get('normalize', 'sum') + if normalize == 'sum': + sd /= sd.sum() + elif normalize == 'to1': + sd /= sd.max() + return sd + + +class SparseCoordInit: + + def __init__(self, pred, gt, format='[bs x c x 2D]', quantile_interval=200, nodiff_thres=0.1): + if torch.is_tensor(pred): + pred = pred.detach().cpu().numpy() + if torch.is_tensor(gt): + gt = gt.detach().cpu().numpy() + + if format == '[bs x c x 2D]': + self.map = ((pred[0] - gt[0]) ** 2).sum(0) + self.reference_gt = copy.deepcopy(np.transpose(gt[0], (1, 2, 0))) + elif format == ['[2D x c]']: + self.map = (np.abs(pred - gt)).sum(-1) + self.reference_gt = copy.deepcopy(gt[0]) + else: + raise ValueError + + # OptionA: Zero too small errors to avoid the error too small deadloop + self.map[self.map < nodiff_thres] = 0 + quantile_interval = np.linspace(0., 1., quantile_interval) + quantized_interval = np.quantile(self.map, quantile_interval) + # remove redundant + quantized_interval = np.unique(quantized_interval) + quantized_interval = sorted(quantized_interval[1:-1]) + self.map = np.digitize(self.map, quantized_interval, right=False) + self.map = np.clip(self.map, 0, 255).astype(np.uint8) + self.idcnt = {} + for idi in sorted(np.unique(self.map)): + self.idcnt[idi] = (self.map == idi).sum() + # remove smallest one to remove the correct region + self.idcnt.pop(min(self.idcnt.keys())) + + def __call__(self): + if len(self.idcnt) == 0: + h, w = self.map.shape + return [np.random.uniform(0, 1) * w, np.random.uniform(0, 1) * h] + + target_id = max(self.idcnt, key=self.idcnt.get) + _, component, cstats, ccenter = cv2.connectedComponentsWithStats( + (self.map == target_id).astype(np.uint8), + connectivity=4 + ) + # remove cid = 0, it is the invalid area + csize = [ci[-1] for ci in cstats[1:]] + target_cid = csize.index(max(csize)) + 1 + center = ccenter[target_cid][::-1] + coord = np.stack(np.where(component == target_cid)).T + dist = np.linalg.norm(coord - center, axis=1) + target_coord_id = np.argmin(dist) + coord_h, coord_w = coord[target_coord_id] + + # replace_sampling + self.idcnt[target_id] -= max(csize) + if self.idcnt[target_id] == 0: + self.idcnt.pop(target_id) + self.map[component == target_cid] = 0 + return [coord_w, coord_h] + + +class RandomCoordInit: + def __init__(self, canvas_width, canvas_height): + self.canvas_width, self.canvas_height = canvas_width, canvas_height + + def __call__(self): + w, h = self.canvas_width, self.canvas_height + return [np.random.uniform(0, 1) * w, np.random.uniform(0, 1) * h] + + +class NaiveCoordInit: + def __init__(self, pred, gt, format='[bs x c x 2D]', replace_sampling=True): + if isinstance(pred, torch.Tensor): + pred = pred.detach().cpu().numpy() + if isinstance(gt, torch.Tensor): + gt = gt.detach().cpu().numpy() + + if format == '[bs x c x 2D]': + self.map = ((pred[0] - gt[0]) ** 2).sum(0) + elif format == ['[2D x c]']: + self.map = ((pred - gt) ** 2).sum(-1) + else: + raise ValueError + self.replace_sampling = replace_sampling + + def __call__(self): + coord = np.where(self.map == self.map.max()) + coord_h, coord_w = coord[0][0], coord[1][0] + if self.replace_sampling: + self.map[coord_h, coord_w] = -1 + return [coord_w, coord_h] + + +class PainterOptimizer: + + def __init__(self, + renderer: Painter, + style: str, + num_iter: int, + lr_config: omegaconf.DictConfig, + trainable_bg: bool = False): + self.renderer = renderer + self.num_iter = num_iter + self.trainable_bg = trainable_bg + self.lr_config = lr_config + + # set optimized params via style + self.optim_point, self.optim_color, self.optim_width = { + "iconography": (True, True, False), + "pixelart": (False, True, False), + "low-poly": (True, True, False), + "sketch": (True, True, False), + "ink": (True, True, True), + "painting": (True, True, True) + }.get(style, (False, False, False)) + self.optim_bg = trainable_bg + + # set lr schedule + schedule_cfg = lr_config.schedule + if schedule_cfg.name == 'linear': + self.lr_lambda = LinearDecayWithKeepLRLambda(init_lr=lr_config.point, + keep_ratio=schedule_cfg.keep_ratio, + decay_every=self.num_iter, + decay_ratio=schedule_cfg.decay_ratio) + elif schedule_cfg.name == 'cosine': + self.lr_lambda = CosineWithWarmupLRLambda(num_steps=self.num_iter, + warmup_steps=schedule_cfg.warmup_steps, + warmup_start_lr=schedule_cfg.warmup_start_lr, + warmup_end_lr=schedule_cfg.warmup_end_lr, + cosine_end_lr=schedule_cfg.cosine_end_lr) + else: + print(f"{schedule_cfg.name} is not support.") + self.lr_lambda = None + + self.point_optimizer = None + self.color_optimizer = None + self.width_optimizer = None + self.bg_optimizer = None + self.point_scheduler = None + + def init_optimizers(self, pid_delta: int = 0): + # optimizer + optim_cfg = self.lr_config.optim + optim_name = optim_cfg.name + + params = {} + if self.optim_point: + self.renderer.set_point_parameters(pid_delta) + params['point'] = self.renderer.get_point_parameters() + self.point_optimizer = get_optimizer(optim_name, params['point'], self.lr_config.point, optim_cfg) + + if self.optim_color: + self.renderer.set_color_parameters() + params['color'] = self.renderer.get_color_parameters() + self.color_optimizer = get_optimizer(optim_name, params['color'], self.lr_config.color, optim_cfg) + + if self.optim_width: + self.renderer.set_width_parameters() + params['width'] = self.renderer.get_width_parameters() + if len(params['width']) > 0: + self.width_optimizer = get_optimizer(optim_name, params['width'], self.lr_config.width, optim_cfg) + + if self.optim_bg: + self.renderer.para_bg.requires_grad = True + self.bg_optimizer = get_optimizer(optim_name, self.renderer.para_bg, self.lr_config.bg, optim_cfg) + + # lr schedule + if self.lr_lambda is not None and self.optim_point: + self.point_scheduler = LambdaLR(self.point_optimizer, lr_lambda=self.lr_lambda, last_epoch=-1) + + def update_lr(self): + if self.point_scheduler is not None: + self.point_scheduler.step() + + def zero_grad_(self): + if self.point_optimizer is not None: + self.point_optimizer.zero_grad() + if self.color_optimizer is not None: + self.color_optimizer.zero_grad() + if self.width_optimizer is not None: + self.width_optimizer.zero_grad() + if self.bg_optimizer is not None: + self.bg_optimizer.zero_grad() + + def step_(self): + if self.point_optimizer is not None: + self.point_optimizer.step() + if self.color_optimizer is not None: + self.color_optimizer.step() + if self.width_optimizer is not None: + self.width_optimizer.step() + if self.bg_optimizer is not None: + self.bg_optimizer.step() + + def get_lr(self) -> Dict: + lr = {} + if self.point_optimizer is not None: + lr['pnt'] = self.point_optimizer.param_groups[0]['lr'] + if self.color_optimizer is not None: + lr['clr'] = self.color_optimizer.param_groups[0]['lr'] + if self.width_optimizer is not None: + lr['wd'] = self.width_optimizer.param_groups[0]['lr'] + if self.bg_optimizer is not None: + lr['bg'] = self.bg_optimizer.param_groups[0]['lr'] + return lr + + +class LinearDecayWithKeepLRLambda: + """apply in LIVE stage""" + + def __init__(self, init_lr, keep_ratio, decay_every, decay_ratio): + self.init_lr = init_lr + self.keep_ratio = keep_ratio + self.decay_every = decay_every + self.decay_ratio = decay_ratio + + def __call__(self, n): + if n < self.keep_ratio * self.decay_every: + return self.init_lr + + decay_time = n // self.decay_every + decay_step = n % self.decay_every + lr_s = self.decay_ratio ** decay_time + lr_e = self.decay_ratio ** (decay_time + 1) + r = decay_step / self.decay_every + lr = lr_s * (1 - r) + lr_e * r + return lr + + +class CosineWithWarmupLRLambda: + """apply in fine-tuning stage""" + + def __init__(self, num_steps, warmup_steps, warmup_start_lr, warmup_end_lr, cosine_end_lr): + self.n_steps = num_steps + self.n_warmup = warmup_steps + self.warmup_start_lr = warmup_start_lr + self.warmup_end_lr = warmup_end_lr + self.cosine_end_lr = cosine_end_lr + + def __call__(self, n): + if n < self.n_warmup: + # linearly warmup + return self.warmup_start_lr + (n / self.n_warmup) * (self.warmup_end_lr - self.warmup_start_lr) + else: + # cosine decayed schedule + return self.cosine_end_lr + 0.5 * (self.warmup_end_lr - self.cosine_end_lr) * ( + 1 + math.cos(math.pi * (n - self.n_warmup) / (self.n_steps - self.n_warmup))) diff --git a/pytorch_svgrender/painter/vectorglyph/__init__.py b/pytorch_svgrender/painter/vectorglyph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa84ada11e41ab2d46cded89b15eccbae0ba26be --- /dev/null +++ b/pytorch_svgrender/painter/vectorglyph/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .painter_params import Painter, PainterOptimizer diff --git a/pytorch_svgrender/painter/vectorglyph/bezier.py b/pytorch_svgrender/painter/vectorglyph/bezier.py new file mode 100644 index 0000000000000000000000000000000000000000..88d4a4ded50230cb37eb332a735db39685564c01 --- /dev/null +++ b/pytorch_svgrender/painter/vectorglyph/bezier.py @@ -0,0 +1,127 @@ +import numpy as np +import matplotlib.pyplot as plt +from scipy.special import binom +from numpy.linalg import norm + + +def num_bezier(n_ctrl, degree=3): + if type(n_ctrl) == np.ndarray: + n_ctrl = len(n_ctrl) + return int((n_ctrl - 1) / degree) + + +def bernstein(n, i): + bi = binom(n, i) + return lambda t, bi=bi, n=n, i=i: bi * t ** i * (1 - t) ** (n - i) + + +def bezier(P, t, d=0): + """Bezier curve of degree len(P)-1. d is the derivative order (0 gives positions)""" + n = P.shape[0] - 1 + if d > 0: + Q = np.diff(P, axis=0) * n + return bezier(Q, t, d - 1) + B = np.vstack([bernstein(n, i)(t) for i, p in enumerate(P)]) + return (P.T @ B).T + + +def cubic_bezier(P, t): + return (1.0 - t) ** 3 * P[0] + 3 * (1.0 - t) ** 2 * t * P[1] + 3 * (1.0 - t) * t ** 2 * P[2] + t ** 3 * P[3] + + +def bezier_piecewise(Cp, subd=100, degree=3, d=0): + """sample a piecewise Bezier curve given a sequence of control points""" + num = num_bezier(Cp.shape[0], degree) + X = [] + for i in range(num): + P = Cp[i * degree:i * degree + degree + 1, :] + t = np.linspace(0, 1., subd)[:-1] + Y = bezier(P, t, d) + X += [Y] + X.append(Cp[-1]) + X = np.vstack(X) + return X + + +def compute_beziers(beziers, subd=100, degree=3): + chain = beziers_to_chain(beziers) + return bezier_piecewise(chain, subd, degree) + + +def plot_control_polygon(Cp, degree=3, lw=0.5, linecolor=np.ones(3) * 0.1): + n_bezier = num_bezier(len(Cp), degree) + for i in range(n_bezier): + cp = Cp[i * degree:i * degree + degree + 1, :] + if degree == 3: + plt.plot(cp[0:2, 0], cp[0:2, 1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[2:, 0], cp[2:, 1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[:, 0], cp[:, 1], 'o', color=[0, 0.5, 1.], markersize=4) + else: + plt.plot(cp[:, 0], cp[:, 1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[:, 0], cp[:, 1], 'o', color=[0, 0.5, 1.]) + + +def chain_to_beziers(chain, degree=3): + """Convert Bezier chain to list of curve segments (4 control points each)""" + num = num_bezier(chain.shape[0], degree) + beziers = [] + for i in range(num): + beziers.append(chain[i * degree:i * degree + degree + 1, :]) + return beziers + + +def beziers_to_chain(beziers): + """Convert list of Bezier curve segments to a piecewise bezier chain (shares vertices)""" + n = len(beziers) + chain = [] + for i in range(n): + chain.append(list(beziers[i][:-1])) + chain.append([beziers[-1][-1]]) + return np.array(sum(chain, [])) + + +def split_cubic(bez, t): + p1, p2, p3, p4 = bez + + p12 = (p2 - p1) * t + p1 + p23 = (p3 - p2) * t + p2 + p34 = (p4 - p3) * t + p3 + + p123 = (p23 - p12) * t + p12 + p234 = (p34 - p23) * t + p23 + + p1234 = (p234 - p123) * t + p123 + + return np.array([p1, p12, p123, p1234]), np.array([p1234, p234, p34, p4]) + + +def approx_arc_length(bez): + c0, c1, c2, c3 = bez + v0 = norm(c1 - c0) * 0.15 + v1 = norm(-0.558983582205757 * c0 + 0.325650248872424 * c1 + 0.208983582205757 * c2 + 0.024349751127576 * c3) + v2 = norm(c3 - c0 + c2 - c1) * 0.26666666666666666 + v3 = norm(-0.024349751127576 * c0 - 0.208983582205757 * c1 - 0.325650248872424 * c2 + 0.558983582205757 * c3) + v4 = norm(c3 - c2) * .15 + return v0 + v1 + v2 + v3 + v4 + + +def subdivide_bezier(bez, thresh): + stack = [bez] + res = [] + while stack: + bez = stack.pop() + l = approx_arc_length(bez) + if l < thresh: + res.append(bez) + else: + b1, b2 = split_cubic(bez, 0.5) + stack += [b2, b1] + return res + + +def subdivide_bezier_chain(C, thresh): + beziers = chain_to_beziers(C) + res = [] + for bez in beziers: + res += subdivide_bezier(bez, thresh) + return beziers_to_chain(res) diff --git a/pytorch_svgrender/painter/vectorglyph/losses.py b/pytorch_svgrender/painter/vectorglyph/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..2f5a21ef7693a237efdbc7496414cdea7eceace8 --- /dev/null +++ b/pytorch_svgrender/painter/vectorglyph/losses.py @@ -0,0 +1,104 @@ +from typing import Dict + +import torch +import torch.nn as nn +from torch.nn import functional as nnf +import torchvision +import numpy as np +from scipy.spatial import Delaunay +from shapely.geometry import Point +from shapely.geometry.polygon import Polygon + + +class ToneLoss(nn.Module): + def __init__(self, cfg): + super(ToneLoss, self).__init__() + self.dist_loss_weight = cfg.dist_loss_weight + self.im_init = None + self.mse_loss = nn.MSELoss() + self.blur = torchvision.transforms.GaussianBlur( + kernel_size=(cfg.pixel_dist_kernel_blur, + cfg.pixel_dist_kernel_blur), + sigma=(cfg.pixel_dist_sigma, cfg.pixel_dist_sigma) + ) + self.init_blurred = None + + def set_image_init(self, im_init): + self.im_init = im_init + self.init_blurred = self.blur(self.im_init) + + def get_scheduler(self, step=None): + if step is not None: + return self.dist_loss_weight * np.exp(-(1 / 5) * ((step - 300) / (20)) ** 2) + else: + return self.dist_loss_weight + + def forward(self, cur_raster, step=None): + blurred_cur = self.blur(cur_raster) + return self.mse_loss(self.init_blurred.detach(), blurred_cur) * self.get_scheduler(step) + + +class ConformalLoss: + def __init__(self, parameters, shape_groups, target_letter: str, device: torch.device): + self.parameters = parameters + self.device = device + self.target_letter = target_letter + self.shape_groups = shape_groups + self.faces = self.init_faces(device) + self.faces_roll_a = [torch.roll(self.faces[i], 1, 1) for i in range(len(self.faces))] + + with torch.no_grad(): + self.angles = [] + self.reset(device) + + def get_angles(self, points: torch.Tensor) -> torch.Tensor: + angles_ = [] + for i in range(len(self.faces)): + triangles = points[self.faces[i]] + triangles_roll_a = points[self.faces_roll_a[i]] + edges = triangles_roll_a - triangles + length = edges.norm(dim=-1) + edges = edges / (length + 1e-1)[:, :, None] + edges_roll = torch.roll(edges, 1, 1) + cosine = torch.einsum('ned,ned->ne', edges, edges_roll) + angles = torch.arccos(cosine) + angles_.append(angles) + return angles_ + + def get_letter_inds(self, letter_to_insert): + for group, l in zip(self.shape_groups, self.target_letter): + if l == letter_to_insert: + letter_inds = group.shape_ids + return letter_inds[0], letter_inds[-1], len(letter_inds) + + def reset(self, device): + points = torch.cat([point.to(device) for point in self.parameters]) + self.angles = self.get_angles(points) + + def init_faces(self, device: torch.device) -> torch.tensor: + faces_ = [] + for j, c in enumerate(self.target_letter): + points_np = [ + self.parameters[i].clone().detach().cpu().numpy() + for i in range(len(self.parameters)) + ] + start_ind, end_ind, shapes_per_letter = self.get_letter_inds(c) + print(c, "start_ind: ", start_ind.item(), ", end_ind: ", end_ind.item()) + holes = [] + if shapes_per_letter > 1: + holes = points_np[start_ind + 1:end_ind] + poly = Polygon(points_np[start_ind], holes=holes) + poly = poly.buffer(0) + points_np = np.concatenate(points_np) + faces = Delaunay(points_np).simplices + is_intersect = np.array([poly.contains(Point(points_np[face].mean(0))) for face in faces], dtype=bool) + faces_.append(torch.from_numpy(faces[is_intersect]).to(device, dtype=torch.int64)) + return faces_ + + def __call__(self) -> torch.Tensor: + loss_angles = 0 + points = torch.cat(self.parameters).to(self.device) + angles = self.get_angles(points) + for i in range(len(self.faces)): + loss_angles += (nnf.mse_loss(angles[i], self.angles[i])) + return loss_angles diff --git a/pytorch_svgrender/painter/vectorglyph/painter_params.py b/pytorch_svgrender/painter/vectorglyph/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..835d1a27d3e98110a0e5c810001ae9ba098c4b19 --- /dev/null +++ b/pytorch_svgrender/painter/vectorglyph/painter_params.py @@ -0,0 +1,404 @@ +import os +import pathlib + +import numpy as np +import omegaconf +import pydiffvg +import torch +from torch.optim.lr_scheduler import LambdaLR + +from methods.diffvg_warp import DiffVGState +from .ttf import font_string_to_beziers, write_letter_svg + + +class Painter(DiffVGState): + + def __init__(self, args, imsize, device): + super(Painter, self).__init__(device=device, use_gpu=True, canvas_width=imsize, canvas_height=imsize) + self.args = args + self.optim_color = self.args.optim_color + + def init_shape(self, path_svg, seed=0): + assert pathlib.Path(path_svg).exists(), f"{path_svg} is not exist!" + print(f"-> init svg from `{path_svg}` ...") + # 1. load svg from path + canvas_width, canvas_height, self.shapes, self.shape_groups = self.load_svg(path_svg) + + # init color + if self.optim_color: + fill_color_init = torch.FloatTensor(np.random.uniform(size=[4])) + fill_color_init[-1] = 1.0 + + for group in self.shape_groups: + group.fill_color = fill_color_init.to(self.device) + + # 2. set learnable parameters + self.set_point_parameters() + if self.optim_color: + self.set_color_parameters() + + img = self.render_warp(seed) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_image(self, step: int = 0): + img = self.render_warp(step) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def clip_curve_shape(self): + if self.optim_color: + for group in self.shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + group.fill_color.data[-1] = 1.0 + + def render_warp(self, seed=0): + scene_args = pydiffvg.RenderFunction.serialize_scene( + self.canvas_width, self.canvas_height, self.shapes, self.shape_groups + ) + _render = pydiffvg.RenderFunction.apply + img = _render(self.canvas_width, # width + self.canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + seed, # seed + None, + *scene_args) + return img + + def set_point_parameters(self): # shape location optimization + self.point_vars = [] + for i, path in enumerate(self.shapes): + path.points.requires_grad = True + self.point_vars.append(path.points) + + def get_point_parameters(self): + return self.point_vars + + def set_color_parameters(self): + self.color_vars = [] + for i, group in enumerate(self.shape_groups): + if group.fill_color is not None: + group.fill_color.requires_grad = True + self.color_vars.append(group.fill_color) + if group.stroke_color is not None: + group.stroke_color.requires_grad = True + self.color_vars.append(group.stroke_color) + + def get_color_parameters(self): + return self.color_vars + + def get_width_parameters(self): + return self.width_vars + + def preprocess_font(self, word, letter, level_of_cc=1, font_path=None, init_path=None): + if level_of_cc == 0: + target_cp = None + else: + target_cp = {"A": 120, "B": 120, "C": 100, "D": 100, + "E": 120, "F": 120, "G": 120, "H": 120, + "I": 35, "J": 80, "K": 100, "L": 80, + "M": 100, "N": 100, "O": 100, "P": 120, + "Q": 120, "R": 130, "S": 110, "T": 90, + "U": 100, "V": 100, "W": 100, "X": 130, + "Y": 120, "Z": 120, + "a": 120, "b": 120, "c": 100, "d": 100, + "e": 120, "f": 120, "g": 120, "h": 120, + "i": 35, "j": 80, "k": 100, "l": 80, + "m": 100, "n": 100, "o": 100, "p": 120, + "q": 120, "r": 130, "s": 110, "t": 90, + "u": 100, "v": 100, "w": 100, "x": 130, + "y": 120, "z": 120} + target_cp = {k: v * level_of_cc for k, v in target_cp.items()} + + print("init_path: ", init_path) + + subdivision_thresh = None + self.font_string_to_svgs(init_path, + font_path, + word, + target_control=target_cp, + subdivision_thresh=subdivision_thresh) + self.normalize_letter_size(init_path, font_path, word) + + # optimize two adjacent letters + print("letter: ", letter) + if len(letter) > 1: + subdivision_thresh = None + self.font_string_to_svgs(init_path, + font_path, + letter, + target_control=target_cp, + subdivision_thresh=subdivision_thresh) + self.normalize_letter_size(init_path, font_path, letter) + + print("preprocess_font done.") + + def font_string_to_svgs(self, dest_path, font, txt, size=30, spacing=1.0, target_control=None, + subdivision_thresh=None): + fontname = self.args.font + glyph_beziers = font_string_to_beziers(font, txt, size, spacing, merge=False, target_control=target_control) + + # compute bounding box + points = np.vstack(sum(glyph_beziers, [])) + lt = np.min(points, axis=0) + rb = np.max(points, axis=0) + size = rb - lt + + sizestr = 'width="%.1f" height="%.1f"' % (size[0], size[1]) + boxstr = ' viewBox="%.1f %.1f %.1f %.1f"' % (lt[0], lt[1], size[0], size[1]) + header = '''<?xml version="1.0" encoding="utf-8"?> + <svg xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" baseProfile="full" ''' + header += sizestr + header += boxstr + header += '>\n<defs/>\n' + + svg_all = header + + for i, (c, beziers) in enumerate(zip(txt, glyph_beziers)): + fname, path = write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path) + + num_cp = self.count_cp(fname) + print(f"Total control point: {num_cp} -- {c}") + # Add to global svg + svg_all += path + '</g>\n' + + # Save global svg + svg_all += '</svg>\n' + fname = f"{dest_path}/{fontname}_{txt}.svg" + fname = fname.replace(" ", "_") + with open(fname, 'w') as f: + f.write(svg_all) + + def count_cp(self, file_name): + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(file_name) + p_counter = 0 + for path in shapes: + p_counter += path.points.shape[0] + return p_counter + + def normalize_letter_size(self, dest_path, font, txt): + fontname = os.path.splitext(os.path.basename(font))[0] + for i, c in enumerate(txt): + fname = f"{dest_path}/{fontname}_{c}.svg" + fname = fname.replace(" ", "_") + self.fix_single_svg(fname) + + fname = f"{dest_path}/{fontname}_{txt}.svg" + fname = fname.replace(" ", "_") + self.fix_single_svg(fname, all_word=True) + + def fix_single_svg(self, svg_path, all_word=False): + target_h_letter = 360 + target_canvas_width, target_canvas_height = 600, 600 + + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_path) + + letter_h = canvas_height + letter_w = canvas_width + + if all_word: + if letter_w > letter_h: + scale_canvas_w = target_h_letter / letter_w + hsize = int(letter_h * scale_canvas_w) + scale_canvas_h = hsize / letter_h + else: + scale_canvas_h = target_h_letter / letter_h + wsize = int(letter_w * scale_canvas_h) + scale_canvas_w = wsize / letter_w + else: + scale_canvas_h = target_h_letter / letter_h + wsize = int(letter_w * scale_canvas_h) + scale_canvas_w = wsize / letter_w + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] * scale_canvas_w + p.points[:, 1] = p.points[:, 1] * scale_canvas_h + target_h_letter + + w_min = min([torch.min(p.points[:, 0]) for p in shapes]) + w_max = max([torch.max(p.points[:, 0]) for p in shapes]) + h_min = min([torch.min(p.points[:, 1]) for p in shapes]) + h_max = max([torch.max(p.points[:, 1]) for p in shapes]) + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] + (target_canvas_width / 2) - int(w_min + (w_max - w_min) / 2) + p.points[:, 1] = p.points[:, 1] + (target_canvas_height / 2) - int(h_min + (h_max - h_min) / 2) + + output_path = f"{svg_path[:-4]}_scaled.svg" + print("output_path: ", output_path) + self.save_svg(output_path, target_canvas_width, target_canvas_height, shapes, shape_groups) + + def combine_word(self, word, letter, font, results_dir): + word_svg_scaled = results_dir / f"{font}_{word}_scaled.svg" + canvas_width_word, canvas_height_word, shapes_word, shape_groups_word = pydiffvg.svg_to_scene(word_svg_scaled) + letter_ids = [] + for l in letter: + letter_ids += self.get_letter_ids(l, word, shape_groups_word) + + w_min, w_max = min([torch.min(shapes_word[ids].points[:, 0]) for ids in letter_ids]), max( + [torch.max(shapes_word[ids].points[:, 0]) for ids in letter_ids]) + h_min, h_max = min([torch.min(shapes_word[ids].points[:, 1]) for ids in letter_ids]), max( + [torch.max(shapes_word[ids].points[:, 1]) for ids in letter_ids]) + + c_w = (-w_min + w_max) / 2 + c_h = (-h_min + h_max) / 2 + + svg_result = results_dir / "final_letter.svg" + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_result) + + out_w_min, out_w_max = min([torch.min(p.points[:, 0]) for p in shapes]), max( + [torch.max(p.points[:, 0]) for p in shapes]) + out_h_min, out_h_max = min([torch.min(p.points[:, 1]) for p in shapes]), max( + [torch.max(p.points[:, 1]) for p in shapes]) + + out_c_w = (-out_w_min + out_w_max) / 2 + out_c_h = (-out_h_min + out_h_max) / 2 + + scale_canvas_w = (w_max - w_min) / (out_w_max - out_w_min) + scale_canvas_h = (h_max - h_min) / (out_h_max - out_h_min) + + if scale_canvas_h > scale_canvas_w: + wsize = int((out_w_max - out_w_min) * scale_canvas_h) + scale_canvas_w = wsize / (out_w_max - out_w_min) + shift_w = -out_c_w * scale_canvas_w + c_w + else: + hsize = int((out_h_max - out_h_min) * scale_canvas_w) + scale_canvas_h = hsize / (out_h_max - out_h_min) + shift_h = -out_c_h * scale_canvas_h + c_h + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] * scale_canvas_w + p.points[:, 1] = p.points[:, 1] * scale_canvas_h + if scale_canvas_h > scale_canvas_w: + p.points[:, 0] = p.points[:, 0] - out_w_min * scale_canvas_w + w_min + shift_w + p.points[:, 1] = p.points[:, 1] - out_h_min * scale_canvas_h + h_min + else: + p.points[:, 0] = p.points[:, 0] - out_w_min * scale_canvas_w + w_min + p.points[:, 1] = p.points[:, 1] - out_h_min * scale_canvas_h + h_min + shift_h + + for j, s in enumerate(letter_ids): + shapes_word[s] = shapes[j] + + word_letter_result = results_dir / f"{font}_{word}_{letter}.svg" + self.save_svg(word_letter_result, canvas_width, canvas_height, shapes_word, shape_groups_word) + + render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_width, + canvas_height, + shapes_word, + shape_groups_word) + img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + + word_letter_result = results_dir / f"{font}_{word}_{letter}.png" + self.save_image(img, word_letter_result) + + def get_letter_ids(self, letter, word, shape_groups): + for group, l in zip(shape_groups, word): + if l == letter: + return group.shape_ids + + def pretty_save_svg(self, filename, width=None, height=None, shapes=None, shape_groups=None): + width = self.canvas_width if width is None else width + height = self.canvas_height if height is None else height + shapes = self.shapes if shapes is None else shapes + shape_groups = self.shape_groups if shape_groups is None else shape_groups + + self.save_svg(filename, width, height, shapes, shape_groups, use_gamma=False, background=None) + + +class PainterOptimizer: + + def __init__(self, renderer: Painter, num_iter: int, lr_cfg: omegaconf.DictConfig, optim_color: bool = False): + self.renderer = renderer + self.num_iter = num_iter + self.lr_cfg = lr_cfg + self.optim_color = optim_color + + self.point_optimizer = None + self.color_optimizer = None + self.scheduler = None + + def init_optimizers(self): + # optimizer + point_vars = self.renderer.get_point_parameters() + self.point_optimizer = torch.optim.Adam(point_vars, lr=self.lr_cfg.point, betas=(0.9, 0.9), eps=1e-6) + + if self.optim_color: + color_vars = self.renderer.get_color_parameters() + self.color_optimizer = torch.optim.Adam(color_vars, lr=self.lr_cfg.color, betas=(0.9, 0.9), eps=1e-6) + + # lr schedule + lr_lambda_fn = lambda step: learning_rate_decay( + step, + self.lr_cfg.lr_init, + self.lr_cfg.lr_final, + self.num_iter, + self.lr_cfg.lr_delay_steps, + self.lr_cfg.lr_delay_mult + ) / self.lr_cfg.lr_init + self.scheduler = LambdaLR(self.point_optimizer, lr_lambda=lr_lambda_fn, last_epoch=-1) + + def update_lr(self): + self.scheduler.step() + + def zero_grad_(self): + self.point_optimizer.zero_grad() + if self.optim_color: + self.color_optimizer.zero_grad() + + def step_(self): + self.point_optimizer.step() + if self.optim_color: + self.color_optimizer.step() + + def get_lr(self): + return self.point_optimizer.param_groups[0]['lr'] + + +def learning_rate_decay(step, + lr_init, + lr_final, + max_steps, + lr_delay_steps=0, + lr_delay_mult=1): + """ + Continuous learning rate decay function. + The returned rate is lr_init when step=0 and lr_final when step=max_steps, and + is log-linearly interpolated elsewhere (equivalent to exponential decay). + If lr_delay_steps>0 then the learning rate will be scaled by some smooth + function of lr_delay_mult, such that the initial learning rate is + lr_init*lr_delay_mult at the beginning of optimization but will be eased back + to the normal learning rate when steps>lr_delay_steps. + + pytorch adaptation of https://github.com/google/mipnerf + + Args: + step: int, the current optimization step. + lr_init: float, the initial learning rate. + lr_final: float, the final learning rate. + max_steps: int, the number of steps during optimization. + lr_delay_steps: int, the number of steps to delay the full learning rate. + lr_delay_mult: float, the multiplier on the rate when delaying it. + Returns: + lr: the learning for current step 'step'. + """ + if lr_delay_steps > 0: + # A kind of reverse cosine decay. + delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin( + 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)) + else: + delay_rate = 1. + t = np.clip(step / max_steps, 0, 1) + log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) + return delay_rate * log_lerp diff --git a/pytorch_svgrender/painter/vectorglyph/ttf.py b/pytorch_svgrender/painter/vectorglyph/ttf.py new file mode 100644 index 0000000000000000000000000000000000000000..c769db9ff034bd19cf2a9b6b86e97bbbad5cc4bc --- /dev/null +++ b/pytorch_svgrender/painter/vectorglyph/ttf.py @@ -0,0 +1,114 @@ +import numpy as np +import freetype as ft + +from . import bezier + + +def glyph_to_cubics(face, x=0): + """Convert current font face glyph to cubic beziers""" + + def linear_to_cubic(Q): + a, b = Q + return [a + (b - a) * t for t in np.linspace(0, 1, 4)] + + def quadratic_to_cubic(Q): + return [Q[0], + Q[0] + (2 / 3) * (Q[1] - Q[0]), + Q[2] + (2 / 3) * (Q[1] - Q[2]), + Q[2]] + + beziers = [] + pt = lambda p: np.array([p.x + x, -p.y]) # Flipping here since freetype has y-up + last = lambda: beziers[-1][-1] + + def move_to(a, beziers): + beziers.append([pt(a)]) + + def line_to(a, beziers): + Q = linear_to_cubic([last(), pt(a)]) + beziers[-1] += Q[1:] + + def conic_to(a, b, beziers): + Q = quadratic_to_cubic([last(), pt(a), pt(b)]) + beziers[-1] += Q[1:] + + def cubic_to(a, b, c, beziers): + beziers[-1] += [pt(a), pt(b), pt(c)] + + face.glyph.outline.decompose(beziers, move_to=move_to, line_to=line_to, conic_to=conic_to, cubic_to=cubic_to) + beziers = [np.array(C).astype(float) for C in beziers] + return beziers + + +def font_string_to_beziers(font, txt, size=30, spacing=1.0, merge=True, target_control=None): + """ + Load a font and convert the outlines for a given string to cubic bezier curves, + if merge is True, simply return a list of all bezier curves, + otherwise return a list of lists with the bezier curves for each glyph + """ + + face = ft.Face(font) + face.set_char_size(64 * size) + slot = face.glyph + + x = 0 + beziers = [] + previous = 0 + for c in txt: + face.load_char(c, ft.FT_LOAD_DEFAULT | ft.FT_LOAD_NO_BITMAP) + bez = glyph_to_cubics(face, x) + + # Check number of control points if desired + if target_control is not None: + if c in target_control.keys(): + nctrl = np.sum([len(C) for C in bez]) + while nctrl < target_control[c]: + longest = np.max( + sum([[bezier.approx_arc_length(b) for b in bezier.chain_to_beziers(C)] for C in bez], [])) + thresh = longest * 0.5 + bez = [bezier.subdivide_bezier_chain(C, thresh) for C in bez] + nctrl = np.sum([len(C) for C in bez]) + print("nctrl: ", nctrl) + + if merge: + beziers += bez + else: + beziers.append(bez) + + kerning = face.get_kerning(previous, c) + x += (slot.advance.x + kerning.x) * spacing + previous = c + + return beziers + + +def bezier_chain_to_commands(C, closed=True): + curves = bezier.chain_to_beziers(C) + cmds = 'M %f %f ' % (C[0][0], C[0][1]) + n = len(curves) + for i, bez in enumerate(curves): + if i == n - 1 and closed: + cmds += 'C %f %f %f %f %f %fz ' % (*bez[1], *bez[2], *bez[3]) + else: + cmds += 'C %f %f %f %f %f %f ' % (*bez[1], *bez[2], *bez[3]) + return cmds + + +def write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path): + cmds = '' + svg = header + + path = '<g><path d="' + for C in beziers: + if subdivision_thresh is not None: + print('subd') + C = bezier.subdivide_bezier_chain(C, subdivision_thresh) + cmds += bezier_chain_to_commands(C, True) + path += cmds + '"/>\n' + svg += path + '</g></svg>\n' + + fname = f"{dest_path}/{fontname}_{c}.svg" + fname = fname.replace(" ", "_") + with open(fname, 'w') as f: + f.write(svg) + return fname, path diff --git a/pytorch_svgrender/painter/wordasimage/__init__.py b/pytorch_svgrender/painter/wordasimage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa84ada11e41ab2d46cded89b15eccbae0ba26be --- /dev/null +++ b/pytorch_svgrender/painter/wordasimage/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from .painter_params import Painter, PainterOptimizer diff --git a/pytorch_svgrender/painter/wordasimage/bezier.py b/pytorch_svgrender/painter/wordasimage/bezier.py new file mode 100644 index 0000000000000000000000000000000000000000..88d4a4ded50230cb37eb332a735db39685564c01 --- /dev/null +++ b/pytorch_svgrender/painter/wordasimage/bezier.py @@ -0,0 +1,127 @@ +import numpy as np +import matplotlib.pyplot as plt +from scipy.special import binom +from numpy.linalg import norm + + +def num_bezier(n_ctrl, degree=3): + if type(n_ctrl) == np.ndarray: + n_ctrl = len(n_ctrl) + return int((n_ctrl - 1) / degree) + + +def bernstein(n, i): + bi = binom(n, i) + return lambda t, bi=bi, n=n, i=i: bi * t ** i * (1 - t) ** (n - i) + + +def bezier(P, t, d=0): + """Bezier curve of degree len(P)-1. d is the derivative order (0 gives positions)""" + n = P.shape[0] - 1 + if d > 0: + Q = np.diff(P, axis=0) * n + return bezier(Q, t, d - 1) + B = np.vstack([bernstein(n, i)(t) for i, p in enumerate(P)]) + return (P.T @ B).T + + +def cubic_bezier(P, t): + return (1.0 - t) ** 3 * P[0] + 3 * (1.0 - t) ** 2 * t * P[1] + 3 * (1.0 - t) * t ** 2 * P[2] + t ** 3 * P[3] + + +def bezier_piecewise(Cp, subd=100, degree=3, d=0): + """sample a piecewise Bezier curve given a sequence of control points""" + num = num_bezier(Cp.shape[0], degree) + X = [] + for i in range(num): + P = Cp[i * degree:i * degree + degree + 1, :] + t = np.linspace(0, 1., subd)[:-1] + Y = bezier(P, t, d) + X += [Y] + X.append(Cp[-1]) + X = np.vstack(X) + return X + + +def compute_beziers(beziers, subd=100, degree=3): + chain = beziers_to_chain(beziers) + return bezier_piecewise(chain, subd, degree) + + +def plot_control_polygon(Cp, degree=3, lw=0.5, linecolor=np.ones(3) * 0.1): + n_bezier = num_bezier(len(Cp), degree) + for i in range(n_bezier): + cp = Cp[i * degree:i * degree + degree + 1, :] + if degree == 3: + plt.plot(cp[0:2, 0], cp[0:2, 1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[2:, 0], cp[2:, 1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[:, 0], cp[:, 1], 'o', color=[0, 0.5, 1.], markersize=4) + else: + plt.plot(cp[:, 0], cp[:, 1], ':', color=linecolor, linewidth=lw) + plt.plot(cp[:, 0], cp[:, 1], 'o', color=[0, 0.5, 1.]) + + +def chain_to_beziers(chain, degree=3): + """Convert Bezier chain to list of curve segments (4 control points each)""" + num = num_bezier(chain.shape[0], degree) + beziers = [] + for i in range(num): + beziers.append(chain[i * degree:i * degree + degree + 1, :]) + return beziers + + +def beziers_to_chain(beziers): + """Convert list of Bezier curve segments to a piecewise bezier chain (shares vertices)""" + n = len(beziers) + chain = [] + for i in range(n): + chain.append(list(beziers[i][:-1])) + chain.append([beziers[-1][-1]]) + return np.array(sum(chain, [])) + + +def split_cubic(bez, t): + p1, p2, p3, p4 = bez + + p12 = (p2 - p1) * t + p1 + p23 = (p3 - p2) * t + p2 + p34 = (p4 - p3) * t + p3 + + p123 = (p23 - p12) * t + p12 + p234 = (p34 - p23) * t + p23 + + p1234 = (p234 - p123) * t + p123 + + return np.array([p1, p12, p123, p1234]), np.array([p1234, p234, p34, p4]) + + +def approx_arc_length(bez): + c0, c1, c2, c3 = bez + v0 = norm(c1 - c0) * 0.15 + v1 = norm(-0.558983582205757 * c0 + 0.325650248872424 * c1 + 0.208983582205757 * c2 + 0.024349751127576 * c3) + v2 = norm(c3 - c0 + c2 - c1) * 0.26666666666666666 + v3 = norm(-0.024349751127576 * c0 - 0.208983582205757 * c1 - 0.325650248872424 * c2 + 0.558983582205757 * c3) + v4 = norm(c3 - c2) * .15 + return v0 + v1 + v2 + v3 + v4 + + +def subdivide_bezier(bez, thresh): + stack = [bez] + res = [] + while stack: + bez = stack.pop() + l = approx_arc_length(bez) + if l < thresh: + res.append(bez) + else: + b1, b2 = split_cubic(bez, 0.5) + stack += [b2, b1] + return res + + +def subdivide_bezier_chain(C, thresh): + beziers = chain_to_beziers(C) + res = [] + for bez in beziers: + res += subdivide_bezier(bez, thresh) + return beziers_to_chain(res) diff --git a/pytorch_svgrender/painter/wordasimage/losses.py b/pytorch_svgrender/painter/wordasimage/losses.py new file mode 100644 index 0000000000000000000000000000000000000000..2f5a21ef7693a237efdbc7496414cdea7eceace8 --- /dev/null +++ b/pytorch_svgrender/painter/wordasimage/losses.py @@ -0,0 +1,104 @@ +from typing import Dict + +import torch +import torch.nn as nn +from torch.nn import functional as nnf +import torchvision +import numpy as np +from scipy.spatial import Delaunay +from shapely.geometry import Point +from shapely.geometry.polygon import Polygon + + +class ToneLoss(nn.Module): + def __init__(self, cfg): + super(ToneLoss, self).__init__() + self.dist_loss_weight = cfg.dist_loss_weight + self.im_init = None + self.mse_loss = nn.MSELoss() + self.blur = torchvision.transforms.GaussianBlur( + kernel_size=(cfg.pixel_dist_kernel_blur, + cfg.pixel_dist_kernel_blur), + sigma=(cfg.pixel_dist_sigma, cfg.pixel_dist_sigma) + ) + self.init_blurred = None + + def set_image_init(self, im_init): + self.im_init = im_init + self.init_blurred = self.blur(self.im_init) + + def get_scheduler(self, step=None): + if step is not None: + return self.dist_loss_weight * np.exp(-(1 / 5) * ((step - 300) / (20)) ** 2) + else: + return self.dist_loss_weight + + def forward(self, cur_raster, step=None): + blurred_cur = self.blur(cur_raster) + return self.mse_loss(self.init_blurred.detach(), blurred_cur) * self.get_scheduler(step) + + +class ConformalLoss: + def __init__(self, parameters, shape_groups, target_letter: str, device: torch.device): + self.parameters = parameters + self.device = device + self.target_letter = target_letter + self.shape_groups = shape_groups + self.faces = self.init_faces(device) + self.faces_roll_a = [torch.roll(self.faces[i], 1, 1) for i in range(len(self.faces))] + + with torch.no_grad(): + self.angles = [] + self.reset(device) + + def get_angles(self, points: torch.Tensor) -> torch.Tensor: + angles_ = [] + for i in range(len(self.faces)): + triangles = points[self.faces[i]] + triangles_roll_a = points[self.faces_roll_a[i]] + edges = triangles_roll_a - triangles + length = edges.norm(dim=-1) + edges = edges / (length + 1e-1)[:, :, None] + edges_roll = torch.roll(edges, 1, 1) + cosine = torch.einsum('ned,ned->ne', edges, edges_roll) + angles = torch.arccos(cosine) + angles_.append(angles) + return angles_ + + def get_letter_inds(self, letter_to_insert): + for group, l in zip(self.shape_groups, self.target_letter): + if l == letter_to_insert: + letter_inds = group.shape_ids + return letter_inds[0], letter_inds[-1], len(letter_inds) + + def reset(self, device): + points = torch.cat([point.to(device) for point in self.parameters]) + self.angles = self.get_angles(points) + + def init_faces(self, device: torch.device) -> torch.tensor: + faces_ = [] + for j, c in enumerate(self.target_letter): + points_np = [ + self.parameters[i].clone().detach().cpu().numpy() + for i in range(len(self.parameters)) + ] + start_ind, end_ind, shapes_per_letter = self.get_letter_inds(c) + print(c, "start_ind: ", start_ind.item(), ", end_ind: ", end_ind.item()) + holes = [] + if shapes_per_letter > 1: + holes = points_np[start_ind + 1:end_ind] + poly = Polygon(points_np[start_ind], holes=holes) + poly = poly.buffer(0) + points_np = np.concatenate(points_np) + faces = Delaunay(points_np).simplices + is_intersect = np.array([poly.contains(Point(points_np[face].mean(0))) for face in faces], dtype=bool) + faces_.append(torch.from_numpy(faces[is_intersect]).to(device, dtype=torch.int64)) + return faces_ + + def __call__(self) -> torch.Tensor: + loss_angles = 0 + points = torch.cat(self.parameters).to(self.device) + angles = self.get_angles(points) + for i in range(len(self.faces)): + loss_angles += (nnf.mse_loss(angles[i], self.angles[i])) + return loss_angles diff --git a/pytorch_svgrender/painter/wordasimage/painter_params.py b/pytorch_svgrender/painter/wordasimage/painter_params.py new file mode 100644 index 0000000000000000000000000000000000000000..924e612d2933b21e212357dab31d061b85483488 --- /dev/null +++ b/pytorch_svgrender/painter/wordasimage/painter_params.py @@ -0,0 +1,358 @@ +import os +import pathlib + +import numpy as np +import pydiffvg +import torch +from torch.optim.lr_scheduler import LambdaLR + +from pytorch_svgrender.diffvg_warp import DiffVGState +from .ttf import font_string_to_beziers, write_letter_svg + + +class Painter(DiffVGState): + + def __init__(self, + font: str, + canvas_size: int, + device: torch.device): + super(Painter, self).__init__(device=device, use_gpu=True, canvas_width=canvas_size, canvas_height=canvas_size) + self.font = font + + def init_shape(self, path_svg, seed=0): + assert pathlib.Path(path_svg).exists(), f"{path_svg} is not exist!" + print(f"-> init svg from `{path_svg}` ...") + # 1. load svg from path + canvas_width, canvas_height, self.shapes, self.shape_groups = self.load_svg(path_svg) + # 2. set learnable parameters + self.set_point_parameters() + + img = self.render_warp(seed) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def get_image(self, step: int = 0): + img = self.render_warp(step) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + img = img.unsqueeze(0) # convert img from HWC to NCHW + img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW + return img + + def clip_curve_shape(self): + for group in self.shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + + def set_point_parameters(self): # stroke`s location optimization + self.point_vars = [] + for i, path in enumerate(self.shapes): + path.points.requires_grad = True + self.point_vars.append(path.points) + + def get_point_parameters(self): + return self.point_vars + + def preprocess_font(self, word, letter, level_of_cc=1, font_path=None, init_path=None): + if level_of_cc == 0: + target_cp = None + else: + target_cp = {"A": 120, "B": 120, "C": 100, "D": 100, + "E": 120, "F": 120, "G": 120, "H": 120, + "I": 35, "J": 80, "K": 100, "L": 80, + "M": 100, "N": 100, "O": 100, "P": 120, + "Q": 120, "R": 130, "S": 110, "T": 90, + "U": 100, "V": 100, "W": 100, "X": 130, + "Y": 120, "Z": 120, + "a": 120, "b": 120, "c": 100, "d": 100, + "e": 120, "f": 120, "g": 120, "h": 120, + "i": 35, "j": 80, "k": 100, "l": 80, + "m": 100, "n": 100, "o": 100, "p": 120, + "q": 120, "r": 130, "s": 110, "t": 90, + "u": 100, "v": 100, "w": 100, "x": 130, + "y": 120, "z": 120} + target_cp = {k: v * level_of_cc for k, v in target_cp.items()} + + print("init_path: ", init_path) + + subdivision_thresh = None + self.font_string_to_svgs(init_path, + font_path, + word, + target_control=target_cp, + subdivision_thresh=subdivision_thresh) + self.normalize_letter_size(init_path, font_path, word) + + # optimize two adjacent letters + print("letter: ", letter) + if len(letter) > 1: + subdivision_thresh = None + self.font_string_to_svgs(init_path, + font_path, + letter, + target_control=target_cp, + subdivision_thresh=subdivision_thresh) + self.normalize_letter_size(init_path, font_path, letter) + + print("preprocess_font done.") + + def font_string_to_svgs(self, dest_path, font, txt, size=30, spacing=1.0, target_control=None, + subdivision_thresh=None): + fontname = self.font + glyph_beziers = font_string_to_beziers(font, txt, size, spacing, merge=False, target_control=target_control) + + # compute bounding box + points = np.vstack(sum(glyph_beziers, [])) + lt = np.min(points, axis=0) + rb = np.max(points, axis=0) + size = rb - lt + + sizestr = 'width="%.1f" height="%.1f"' % (size[0], size[1]) + boxstr = ' viewBox="%.1f %.1f %.1f %.1f"' % (lt[0], lt[1], size[0], size[1]) + header = '''<?xml version="1.0" encoding="utf-8"?> + <svg xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" baseProfile="full" ''' + header += sizestr + header += boxstr + header += '>\n<defs/>\n' + + svg_all = header + + for i, (c, beziers) in enumerate(zip(txt, glyph_beziers)): + fname, path = write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path) + + num_cp = self.count_cp(fname) + print(f"Total control point: {num_cp} -- {c}") + # Add to global svg + svg_all += path + '</g>\n' + + # Save global svg + svg_all += '</svg>\n' + fname = f"{dest_path}/{fontname}_{txt}.svg" + fname = fname.replace(" ", "_") + with open(fname, 'w') as f: + f.write(svg_all) + + def count_cp(self, file_name): + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(file_name) + p_counter = 0 + for path in shapes: + p_counter += path.points.shape[0] + return p_counter + + def normalize_letter_size(self, dest_path, font, txt): + fontname = os.path.splitext(os.path.basename(font))[0] + for i, c in enumerate(txt): + fname = f"{dest_path}/{fontname}_{c}.svg" + fname = fname.replace(" ", "_") + self.fix_single_svg(fname) + + fname = f"{dest_path}/{fontname}_{txt}.svg" + fname = fname.replace(" ", "_") + self.fix_single_svg(fname, all_word=True) + + def fix_single_svg(self, svg_path, all_word=False): + target_h_letter = 360 + target_canvas_width, target_canvas_height = 600, 600 + + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_path) + + letter_h = canvas_height + letter_w = canvas_width + + if all_word: + if letter_w > letter_h: + scale_canvas_w = target_h_letter / letter_w + hsize = int(letter_h * scale_canvas_w) + scale_canvas_h = hsize / letter_h + else: + scale_canvas_h = target_h_letter / letter_h + wsize = int(letter_w * scale_canvas_h) + scale_canvas_w = wsize / letter_w + else: + scale_canvas_h = target_h_letter / letter_h + wsize = int(letter_w * scale_canvas_h) + scale_canvas_w = wsize / letter_w + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] * scale_canvas_w + p.points[:, 1] = p.points[:, 1] * scale_canvas_h + target_h_letter + + w_min = min([torch.min(p.points[:, 0]) for p in shapes]) + w_max = max([torch.max(p.points[:, 0]) for p in shapes]) + h_min = min([torch.min(p.points[:, 1]) for p in shapes]) + h_max = max([torch.max(p.points[:, 1]) for p in shapes]) + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] + (target_canvas_width / 2) - int(w_min + (w_max - w_min) / 2) + p.points[:, 1] = p.points[:, 1] + (target_canvas_height / 2) - int(h_min + (h_max - h_min) / 2) + + output_path = f"{svg_path[:-4]}_scaled.svg" + print("output_path: ", output_path) + self.save_svg(output_path, target_canvas_width, target_canvas_height, shapes, shape_groups) + + def combine_word(self, word, letter, font, results_dir): + word_svg_scaled = results_dir / f"{font}_{word}_scaled.svg" + canvas_width_word, canvas_height_word, shapes_word, shape_groups_word = pydiffvg.svg_to_scene(word_svg_scaled) + letter_ids = [] + for l in letter: + letter_ids += self.get_letter_ids(l, word, shape_groups_word) + + w_min, w_max = min([torch.min(shapes_word[ids].points[:, 0]) for ids in letter_ids]), max( + [torch.max(shapes_word[ids].points[:, 0]) for ids in letter_ids]) + h_min, h_max = min([torch.min(shapes_word[ids].points[:, 1]) for ids in letter_ids]), max( + [torch.max(shapes_word[ids].points[:, 1]) for ids in letter_ids]) + + c_w = (-w_min + w_max) / 2 + c_h = (-h_min + h_max) / 2 + + svg_result = results_dir / "final_letter.svg" + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_result) + + out_w_min, out_w_max = min([torch.min(p.points[:, 0]) for p in shapes]), max( + [torch.max(p.points[:, 0]) for p in shapes]) + out_h_min, out_h_max = min([torch.min(p.points[:, 1]) for p in shapes]), max( + [torch.max(p.points[:, 1]) for p in shapes]) + + out_c_w = (-out_w_min + out_w_max) / 2 + out_c_h = (-out_h_min + out_h_max) / 2 + + scale_canvas_w = (w_max - w_min) / (out_w_max - out_w_min) + scale_canvas_h = (h_max - h_min) / (out_h_max - out_h_min) + + if scale_canvas_h > scale_canvas_w: + wsize = int((out_w_max - out_w_min) * scale_canvas_h) + scale_canvas_w = wsize / (out_w_max - out_w_min) + shift_w = -out_c_w * scale_canvas_w + c_w + else: + hsize = int((out_h_max - out_h_min) * scale_canvas_w) + scale_canvas_h = hsize / (out_h_max - out_h_min) + shift_h = -out_c_h * scale_canvas_h + c_h + + for num, p in enumerate(shapes): + p.points[:, 0] = p.points[:, 0] * scale_canvas_w + p.points[:, 1] = p.points[:, 1] * scale_canvas_h + if scale_canvas_h > scale_canvas_w: + p.points[:, 0] = p.points[:, 0] - out_w_min * scale_canvas_w + w_min + shift_w + p.points[:, 1] = p.points[:, 1] - out_h_min * scale_canvas_h + h_min + else: + p.points[:, 0] = p.points[:, 0] - out_w_min * scale_canvas_w + w_min + p.points[:, 1] = p.points[:, 1] - out_h_min * scale_canvas_h + h_min + shift_h + + for j, s in enumerate(letter_ids): + shapes_word[s] = shapes[j] + + word_letter_result = results_dir / f"{font}_{word}_{letter}.svg" + self.save_svg(word_letter_result, canvas_width, canvas_height, shapes_word, shape_groups_word) + + render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(canvas_width, + canvas_height, + shapes_word, + shape_groups_word) + img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args) + img = img[:, :, 3:4] * img[:, :, :3] + \ + torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) + img = img[:, :, :3] + + word_letter_result = results_dir / f"{font}_{word}_{letter}.png" + self.save_image(img, word_letter_result) + + def get_letter_ids(self, letter, word, shape_groups): + for group, l in zip(shape_groups, word): + if l == letter: + return group.shape_ids + + def pretty_save_svg(self, filename, width=None, height=None, shapes=None, shape_groups=None): + width = self.canvas_width if width is None else width + height = self.canvas_height if height is None else height + shapes = self.shapes if shapes is None else shapes + shape_groups = self.shape_groups if shape_groups is None else shape_groups + + self.save_svg(filename, width, height, shapes, shape_groups, use_gamma=False, background=None) + + +class PainterOptimizer: + + def __init__(self, renderer, num_iter, lr_cfg): + self.renderer = renderer + self.num_iter = num_iter + self.lr_cfg = lr_cfg + self.lr_base = {'point': lr_cfg.point_lr} + + point_vars = self.renderer.get_point_parameters() + self.para = {'point': point_vars} + + self.optimizer = None + self.scheduler = None + + def init_optimizers(self): + # optimizer + learnable_params = [ + {'params': self.para[ki], 'lr': self.lr_base[ki]} for ki in sorted(self.para.keys()) + ] + self.optimizer = torch.optim.Adam(learnable_params, betas=(0.9, 0.9), eps=1e-6) + + # lr schedule + lr_lambda_fn = lambda step: learning_rate_decay( + step, + self.lr_cfg.lr_init, + self.lr_cfg.lr_final, + self.num_iter, + self.lr_cfg.lr_delay_steps, + self.lr_cfg.lr_delay_mult + ) / self.lr_cfg.lr_init + self.scheduler = LambdaLR(self.optimizer, lr_lambda=lr_lambda_fn, last_epoch=-1) + + def update_lr(self): + self.scheduler.step() + + def zero_grad_(self): + self.optimizer.zero_grad() + + def step_(self): + self.optimizer.step() + + def get_lr(self): + return self.optimizer.param_groups[0]['lr'] + + +def learning_rate_decay(step, + lr_init, + lr_final, + max_steps, + lr_delay_steps=0, + lr_delay_mult=1): + """ + Continuous learning rate decay function. + The returned rate is lr_init when step=0 and lr_final when step=max_steps, and + is log-linearly interpolated elsewhere (equivalent to exponential decay). + If lr_delay_steps>0 then the learning rate will be scaled by some smooth + function of lr_delay_mult, such that the initial learning rate is + lr_init*lr_delay_mult at the beginning of optimization but will be eased back + to the normal learning rate when steps>lr_delay_steps. + + pytorch adaptation of https://github.com/google/mipnerf + + Args: + step: int, the current optimization step. + lr_init: float, the initial learning rate. + lr_final: float, the final learning rate. + max_steps: int, the number of steps during optimization. + lr_delay_steps: int, the number of steps to delay the full learning rate. + lr_delay_mult: float, the multiplier on the rate when delaying it. + Returns: + lr: the learning for current step 'step'. + """ + if lr_delay_steps > 0: + # A kind of reverse cosine decay. + delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin( + 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)) + else: + delay_rate = 1. + t = np.clip(step / max_steps, 0, 1) + log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t) + return delay_rate * log_lerp diff --git a/pytorch_svgrender/painter/wordasimage/ttf.py b/pytorch_svgrender/painter/wordasimage/ttf.py new file mode 100644 index 0000000000000000000000000000000000000000..c769db9ff034bd19cf2a9b6b86e97bbbad5cc4bc --- /dev/null +++ b/pytorch_svgrender/painter/wordasimage/ttf.py @@ -0,0 +1,114 @@ +import numpy as np +import freetype as ft + +from . import bezier + + +def glyph_to_cubics(face, x=0): + """Convert current font face glyph to cubic beziers""" + + def linear_to_cubic(Q): + a, b = Q + return [a + (b - a) * t for t in np.linspace(0, 1, 4)] + + def quadratic_to_cubic(Q): + return [Q[0], + Q[0] + (2 / 3) * (Q[1] - Q[0]), + Q[2] + (2 / 3) * (Q[1] - Q[2]), + Q[2]] + + beziers = [] + pt = lambda p: np.array([p.x + x, -p.y]) # Flipping here since freetype has y-up + last = lambda: beziers[-1][-1] + + def move_to(a, beziers): + beziers.append([pt(a)]) + + def line_to(a, beziers): + Q = linear_to_cubic([last(), pt(a)]) + beziers[-1] += Q[1:] + + def conic_to(a, b, beziers): + Q = quadratic_to_cubic([last(), pt(a), pt(b)]) + beziers[-1] += Q[1:] + + def cubic_to(a, b, c, beziers): + beziers[-1] += [pt(a), pt(b), pt(c)] + + face.glyph.outline.decompose(beziers, move_to=move_to, line_to=line_to, conic_to=conic_to, cubic_to=cubic_to) + beziers = [np.array(C).astype(float) for C in beziers] + return beziers + + +def font_string_to_beziers(font, txt, size=30, spacing=1.0, merge=True, target_control=None): + """ + Load a font and convert the outlines for a given string to cubic bezier curves, + if merge is True, simply return a list of all bezier curves, + otherwise return a list of lists with the bezier curves for each glyph + """ + + face = ft.Face(font) + face.set_char_size(64 * size) + slot = face.glyph + + x = 0 + beziers = [] + previous = 0 + for c in txt: + face.load_char(c, ft.FT_LOAD_DEFAULT | ft.FT_LOAD_NO_BITMAP) + bez = glyph_to_cubics(face, x) + + # Check number of control points if desired + if target_control is not None: + if c in target_control.keys(): + nctrl = np.sum([len(C) for C in bez]) + while nctrl < target_control[c]: + longest = np.max( + sum([[bezier.approx_arc_length(b) for b in bezier.chain_to_beziers(C)] for C in bez], [])) + thresh = longest * 0.5 + bez = [bezier.subdivide_bezier_chain(C, thresh) for C in bez] + nctrl = np.sum([len(C) for C in bez]) + print("nctrl: ", nctrl) + + if merge: + beziers += bez + else: + beziers.append(bez) + + kerning = face.get_kerning(previous, c) + x += (slot.advance.x + kerning.x) * spacing + previous = c + + return beziers + + +def bezier_chain_to_commands(C, closed=True): + curves = bezier.chain_to_beziers(C) + cmds = 'M %f %f ' % (C[0][0], C[0][1]) + n = len(curves) + for i, bez in enumerate(curves): + if i == n - 1 and closed: + cmds += 'C %f %f %f %f %f %fz ' % (*bez[1], *bez[2], *bez[3]) + else: + cmds += 'C %f %f %f %f %f %f ' % (*bez[1], *bez[2], *bez[3]) + return cmds + + +def write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path): + cmds = '' + svg = header + + path = '<g><path d="' + for C in beziers: + if subdivision_thresh is not None: + print('subd') + C = bezier.subdivide_bezier_chain(C, subdivision_thresh) + cmds += bezier_chain_to_commands(C, True) + path += cmds + '"/>\n' + svg += path + '</g></svg>\n' + + fname = f"{dest_path}/{fontname}_{c}.svg" + fname = fname.replace(" ", "_") + with open(fname, 'w') as f: + f.write(svg) + return fname, path diff --git a/pytorch_svgrender/pipelines/CLIPDraw_pipeline.py b/pytorch_svgrender/pipelines/CLIPDraw_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..2b98bb74bf16815cbe10b3112283ce0d09bce6ef --- /dev/null +++ b/pytorch_svgrender/pipelines/CLIPDraw_pipeline.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import torch +from tqdm.auto import tqdm +from torchvision import transforms +import clip + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.clipdraw import Painter, PainterOptimizer +from pytorch_svgrender.plt import plot_img, plot_couple + + +class CLIPDrawPipeline(ModelState): + + def __init__(self, args): + logdir_ = f"sd{args.seed}" \ + f"-im{args.x.image_size}" \ + f"-P{args.x.num_paths}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + self.clip, self.tokenize_fn = self.init_clip() + + def init_clip(self): + model, _ = clip.load('ViT-B/32', self.device, jit=False) + return model, clip.tokenize + + def drawing_augment(self, image): + augment_trans = transforms.Compose([ + transforms.RandomPerspective(fill=1, p=1, distortion_scale=0.5), + transforms.RandomResizedCrop(224, scale=(0.7, 0.9)), + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + ]) + + # image augmentation transformation + img_augs = [] + for n in range(self.x_cfg.num_aug): + img_augs.append(augment_trans(image)) + im_batch = torch.cat(img_augs) + # clip visual encoding + image_features = self.clip.encode_image(im_batch) + + return image_features + + def painterly_rendering(self, prompt): + self.print(f"prompt: {prompt}") + + # text prompt encoding + text_tokenize = self.tokenize_fn(prompt).to(self.device) + with torch.no_grad(): + text_features = self.clip.encode_text(text_tokenize) + + # init SVG Painter + renderer = Painter(self.x_cfg, + self.args.diffvg, + num_strokes=self.x_cfg.num_paths, + canvas_size=self.x_cfg.image_size, + device=self.device) + img = renderer.init_image(stage=0) + self.print("init_image shape: ", img.shape) + plot_img(img, self.result_path, fname="init_img") + + # init painter optimizer + optimizer = PainterOptimizer(renderer, self.x_cfg.lr, self.x_cfg.width_lr, self.x_cfg.color_lr) + optimizer.init_optimizers() + + total_step = self.x_cfg.num_iter + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_step: + rendering = renderer.get_image(self.step).to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_step - 1): + plot_img(rendering, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + # data augmentation + aug_svg_batch = self.drawing_augment(rendering) + + loss = torch.tensor(0., device=self.device) + for n in range(self.x_cfg.num_aug): + loss -= torch.cosine_similarity(text_features, aug_svg_batch[n:n + 1], dim=1).mean() + + pbar.set_description( + f"lr: {optimizer.get_lr():.3f}, " + f"L_train: {loss.item():.4f}" + ) + + # optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + renderer.clip_curve_shape() + + if self.x_cfg.lr_schedule: + optimizer.update_lr(self.step) + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(img, + rendering, + self.step, + prompt=prompt, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.save_svg(self.svg_logs_dir.as_posix(), f"svg_iter{self.step}") + + self.step += 1 + pbar.update(1) + + renderer.save_svg(self.result_path.as_posix(), "final_svg") + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "clipdraw_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") diff --git a/pytorch_svgrender/pipelines/CLIPFont_pipeline.py b/pytorch_svgrender/pipelines/CLIPFont_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..0bca2c831d101bf18cdb764480abd8258559d1a3 --- /dev/null +++ b/pytorch_svgrender/pipelines/CLIPFont_pipeline.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +from PIL import Image +from typing import AnyStr +import pathlib + +import torch +import torch.nn.functional as F +from torchvision import transforms +from tqdm.auto import tqdm +from svgutils.transform import fromfile + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.plt import plot_img, plot_couple, plot_img_title +from pytorch_svgrender.painter.clipfont import (imagenet_templates, compose_text_with_templates, Painter, + PainterOptimizer) +from pytorch_svgrender.libs.metric.clip_score import CLIPScoreWrapper +from pytorch_svgrender.libs.metric.piq.perceptual import LPIPS + + +class CLIPFontPipeline(ModelState): + + def __init__(self, args): + logdir_ = f"sd{args.seed}" \ + f"-lpips{args.x.lam_lpips}-l2{args.x.lam_l2}" \ + f"{f'-{args.x.font.reinit_color}' if args.x.font.reinit else ''}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + # init clip model + self.clip_wrapper = CLIPScoreWrapper(self.x_cfg.clip.model_name, device=self.device) + # init LPIPS + self.lam_lpips = 0 if self.x_cfg.get('lam_lpips', None) is None else self.x_cfg.lam_lpips + self.lpips_fn = LPIPS() + # l2 + self.lam_l2 = 0 if self.x_cfg.get('lam_l2', None) is None else self.x_cfg.lam_l2 + + def load_target_file(self, tar_path: AnyStr, image_size: int = 224): + process_comp = transforms.Compose([ + transforms.Resize(size=(image_size, image_size)), + transforms.ToTensor(), + transforms.Lambda(lambda t: t.unsqueeze(0)), + ]) + + tar_pil = Image.open(tar_path).convert("RGB") # open file + target_img = process_comp(tar_pil) # preprocess + return target_img.to(self.device) + + def cropper(self, x: torch.Tensor) -> torch.Tensor: + return transforms.RandomCrop(self.x_cfg.crop_size)(x) + + def padding_cropper(self, x: torch.Tensor) -> torch.Tensor: + return transforms.RandomCrop(size=500, padding=100, fill=255, padding_mode='constant')(x) + + def affine_to512(self, x: torch.Tensor) -> torch.Tensor: + comp = transforms.Compose([ + transforms.RandomPerspective(fill=0, p=1, distortion_scale=0.3), + transforms.Resize(512) + ]) + return comp(x) + + def resize224_norm(self, x: torch.Tensor) -> torch.Tensor: + x = torch.nn.functional.interpolate(x, size=224, mode='bicubic') + return self.clip_wrapper.norm_(x) + + def painterly_rendering(self, svg_path, prompt): + svg_path = pathlib.Path(svg_path) + assert svg_path.exists(), f"'{svg_path}' is not exist." + + # load renderer + renderer = self.load_renderer() + + # rescale svg + fig = fromfile(svg_path.as_posix()) + fig.set_size(('512', '512')) + filename = str(svg_path.name).split('.')[0] + svg_path = self.result_path / f'{filename}_scale.svg' + fig.save(svg_path.as_posix()) + + # init shapes and shape groups + init_img = renderer.init_shapes(svg_path.as_posix(), reinit_cfg=self.x_cfg.font) + self.print("init_image shape: ", init_img.shape) + plot_img(init_img, self.result_path, fname="init_img") + + # load init file + with torch.no_grad(): + source_image = self.load_target_file(self.result_path / 'init_img.png', image_size=512) + source_image = source_image.detach() + source_image_feats = self.clip_wrapper.encode_image(self.resize224_norm(source_image)).detach() + + # build optimizer + optimizer = PainterOptimizer(renderer, self.x_cfg.lr_base) + optimizer.init_optimizers() + + # pre-calc + with torch.no_grad(): + # encode text prompt and source prompt + template_text = compose_text_with_templates(prompt, imagenet_templates) + text_features = self.clip_wrapper.encode_text(template_text).detach() + source = "A photo" + template_source = compose_text_with_templates(source, imagenet_templates) + text_source = self.clip_wrapper.encode_text(template_source).detach() + + total_step = self.x_cfg.num_iter + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_step: + img_t = renderer.get_image().to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_step - 1): + plot_img(img_t, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + # style loss + # directional loss 1 + img_proc = [] + for n in range(self.x_cfg.num_crops): + target_crop = self.cropper(img_t) + target_crop = self.affine_to512(target_crop) + img_proc.append(target_crop) + img_aug = torch.cat(img_proc, dim=0) + image_features = self.clip_wrapper.encode_image(self.resize224_norm(img_aug)) + + loss_patch = self.x_cfg.lam_patch * self.clip_wrapper.directional_loss(text_source, + source_image_feats, + text_features, + image_features, + self.x_cfg.thresh) + + # directional loss 2 + img_proc2 = [] + for n in range(32): + target_crop = self.padding_cropper(img_t) + target_crop = self.affine_to512(target_crop) + img_proc2.append(target_crop) + img_aug2 = torch.cat(img_proc2, dim=0) + glob_features = self.clip_wrapper.encode_image(self.resize224_norm(img_aug2)) + + loss_glob = self.x_cfg.lam_dir * self.clip_wrapper.directional_loss(text_source, + source_image_feats, + text_features, glob_features) + + # LPIPS + loss_lpips = self.lam_lpips * self.lpips_fn(img_t, source_image) + + # L2 + loss_l2 = self.lam_l2 * F.mse_loss(img_t, source_image) + + # total loss + loss = loss_patch + loss_glob + loss_lpips + loss_l2 + + # log + p_lr, c_lr = optimizer.get_lr() + pbar.set_description( + f"point_lr: {p_lr}, color_lr: {c_lr}, " + f"L_total: {loss.item():.4f}, " + f"L_patch: {loss_patch.item():.4f}, " + f"L_glob: {loss_glob.item():.4f}, " + f"L_lpips: {loss_lpips.item():.4f}, " + f"L_l2: {loss_l2.item():.4f}." + ) + + # backward and optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + renderer.clip_curve_shape() + + if self.x_cfg.lr_schedule: + optimizer.update_lr(self.step) + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(init_img, + img_t, + self.step, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.pretty_save_svg(self.svg_logs_dir / f"svg_iter{self.step}.svg") + + self.step += 1 + pbar.update(1) + + # log final results + renderer.pretty_save_svg(self.result_path / "final_svg.svg") + final_raster_sketch = renderer.get_image().to(self.device) + plot_img_title(final_raster_sketch, + title=f'final result - {self.step} step', + output_dir=self.result_path, + fname='final_render') + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "clipfont_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") + + def load_renderer(self): + renderer = Painter(device=self.device) + return renderer diff --git a/pytorch_svgrender/pipelines/CLIPascene_pipeline.py b/pytorch_svgrender/pipelines/CLIPascene_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..d7e4f6384b308d211ee0634fa788a80566618525 --- /dev/null +++ b/pytorch_svgrender/pipelines/CLIPascene_pipeline.py @@ -0,0 +1,287 @@ +import shutil +from pathlib import Path + +import imageio +import numpy as np +import torch +from PIL import Image +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.clipascene import Painter, PainterOptimizer, Loss +from pytorch_svgrender.painter.clipascene.lama_utils import apply_inpaint +from pytorch_svgrender.painter.clipascene.scripts_utils import read_svg +from pytorch_svgrender.painter.clipascene.sketch_utils import plot_attn, get_mask_u2net, fix_image_scale +from pytorch_svgrender.plt import plot_img, plot_couple +from skimage.transform import resize +from torchvision import transforms +from torchvision.transforms import InterpolationMode +from tqdm.auto import tqdm + + +class CLIPascenePipeline(ModelState): + def __init__(self, args): + logdir_ = f"sd{args.seed}" \ + f"-im{args.x.image_size}" \ + f"-P{args.x.num_paths}W{args.x.width}" + super().__init__(args, log_path_suffix=logdir_) + + def painterly_rendering(self, image_path): + foreground_target, background_target = self.preprocess_image(image_path) + background_output_dir = self.run_background(background_target) + foreground_output_dir = self.run_foreground(foreground_target) + self.combine(background_output_dir, foreground_output_dir, self.device) + self.close(msg="painterly rendering complete.") + + def preprocess_image(self, image_path): + image_path = Path(image_path) + scene_path = self.result_path / "scene" + background_path = self.result_path / "background" + if self.accelerator.is_main_process: + scene_path.mkdir(parents=True, exist_ok=True) + background_path.mkdir(parents=True, exist_ok=True) + + im = Image.open(image_path) + max_size = max(im.size[0], im.size[1]) + scaled_path = scene_path / f"{image_path.stem}.png" + if max_size > 512: + im = Image.open(image_path).convert("RGB").resize((512, 512)) + im.save(scaled_path) + else: + shutil.copyfile(image_path, scaled_path) + + scaled_img = Image.open(scaled_path) + mask = get_mask_u2net(scaled_img, scene_path, self.args.x.u2net_path, preprocess=True, device=self.device) + masked_path = scene_path / f"{image_path.stem}_mask.png" + imageio.imsave(masked_path, mask) + + apply_inpaint(scene_path, background_path, self.device) + return scaled_path, background_path / f"{image_path.stem}_mask.png" + + def run_background(self, target_file): + print("=====Start background=====") + self.args.x.resize_obj = 0 + self.args.x.mask_object = 0 + + clip_conv_layer_weights_int = [0 for _ in range(12)] + clip_conv_layer_weights_int[self.args.x.background_layer] = 1 + clip_conv_layer_weights_str = [str(j) for j in clip_conv_layer_weights_int] + self.args.x.clip_conv_layer_weights = ','.join(clip_conv_layer_weights_str) + + output_dir = self.result_path / "background" + if self.accelerator.is_main_process: + output_dir.mkdir(parents=True, exist_ok=True) + self.paint(target_file, output_dir, self.args.x.background_num_iter) + print("=====End background=====") + return output_dir + + def run_foreground(self, target_file): + print("=====Start foreground=====") + self.args.x.resize_obj = 1 + if self.args.x.foreground_layer != 4: + self.args.x.gradnorm = 1 + self.args.x.mask_object = 1 + + clip_conv_layer_weights_int = [0 for _ in range(12)] + clip_conv_layer_weights_int[4] = 0.5 + clip_conv_layer_weights_int[self.args.x.foreground_layer] = 1 + clip_conv_layer_weights_str = [str(j) for j in clip_conv_layer_weights_int] + self.args.x.clip_conv_layer_weights = ','.join(clip_conv_layer_weights_str) + + output_dir = self.result_path / "object" + if self.accelerator.is_main_process: + output_dir.mkdir(parents=True, exist_ok=True) + self.paint(target_file, output_dir, self.args.x.foreground_num_iter) + print("=====End foreground=====") + return output_dir + + def paint(self, target, output_dir, num_iter): + png_log_dir = output_dir / "png_logs" + svg_log_dir = output_dir / "svg_logs" + if self.accelerator.is_main_process: + png_log_dir.mkdir(parents=True, exist_ok=True) + svg_log_dir.mkdir(parents=True, exist_ok=True) + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = output_dir / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + # preprocess input image + inputs, mask = self.get_target(target, + self.args.x.image_size, + output_dir, + self.args.x.resize_obj, + self.args.x.u2net_path, + self.args.x.mask_object, + self.args.x.fix_scale, + self.device) + plot_img(inputs, output_dir, fname="target") + loss_func = Loss(self.x_cfg, mask, self.device) + # init renderer + renderer = self.load_renderer(inputs, mask) + + # init optimizer + optimizer = PainterOptimizer(self.x_cfg, renderer) + best_loss, best_fc_loss, best_num_strokes = 100, 100, self.args.x.num_paths + best_iter, best_iter_fc = 0, 0 + min_delta = 1e-7 + renderer.set_random_noise(0) + renderer.init_image(stage=0) + renderer.save_svg(svg_log_dir, "init_svg") + optimizer.init_optimizers() + + if self.args.x.switch_loss: + # start with width optim and than switch every switch_loss iterations + renderer.turn_off_points_optim() + optimizer.turn_off_points_optim() + + with torch.no_grad(): + renderer.get_image("init").to(self.device) + renderer.save_svg(self.result_path, "init") + + total_step = num_iter + step = 0 + with tqdm(initial=step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + while step < total_step: + optimizer.zero_grad_() + sketches = renderer.get_image().to(self.device) + if self.make_video and (step % self.args.framefreq == 0 or step == total_step - 1): + plot_img(sketches, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + losses_dict_weighted, _, _ = loss_func(sketches, inputs.detach(), step, + renderer.get_widths(), renderer, + optimizer, mode="train", + width_opt=renderer.width_optim) + loss = sum(list(losses_dict_weighted.values())) + loss.backward() + optimizer.step_() + + if step % self.args.x.save_step == 0: + plot_couple(inputs, + sketches, + self.step, + output_dir=png_log_dir.as_posix(), + fname=f"iter{step}") + renderer.save_svg(svg_log_dir.as_posix(), f"svg_iter{step}") + + if step % self.args.x.eval_step == 0: + with torch.no_grad(): + losses_dict_weighted_eval, _, _ = loss_func( + sketches, + inputs, + step, + renderer.get_widths(), + renderer=renderer, + mode="eval", + width_opt=renderer.width_optim) + loss_eval = sum(list(losses_dict_weighted_eval.values())) + + cur_delta = loss_eval.item() - best_loss + if abs(cur_delta) > min_delta: + if cur_delta < 0: + best_loss = loss_eval.item() + best_iter = step + plot_couple(inputs, + sketches, + best_iter, + output_dir=output_dir.as_posix(), + fname="best_iter") + renderer.save_svg(output_dir.as_posix(), "best_iter") + + if step == 0 and self.x_cfg.attention_init and self.accelerator.is_main_process: + plot_attn(renderer.get_attn(), + renderer.get_thresh(), + inputs, + renderer.get_inds(), + (output_dir / "attention_map.png").as_posix(), + self.x_cfg.saliency_model) + + if self.args.x.switch_loss: + if step > 0 and step % self.args.x.switch_loss == 0: + renderer.switch_opt() + optimizer.switch_opt() + + step += 1 + pbar.update(1) + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (output_dir / f"clipascene_sketch.mp4").as_posix() + ]) + + def load_renderer(self, target_im=None, mask=None): + renderer = Painter(method_cfg=self.x_cfg, + diffvg_cfg=self.args.diffvg, + num_strokes=self.x_cfg.num_paths, + canvas_size=self.x_cfg.image_size, + device=self.device, + target_im=target_im, + mask=mask) + return renderer + + def get_target(self, + target_file, + image_size, + output_dir, + resize_obj, + u2net_path, + mask_object, + fix_scale, + device): + + target = Image.open(target_file) + + if target.mode == "RGBA": + # Create a white rgba background + new_image = Image.new("RGBA", target.size, "WHITE") + # Paste the image on the background. + new_image.paste(target, (0, 0), target) + target = new_image + target = target.convert("RGB") + + # U^2 net mask + masked_im, mask = get_mask_u2net(target, output_dir, u2net_path, resize_obj=resize_obj, device=device) + if mask_object: + target = masked_im + + if fix_scale: + target = fix_image_scale(target) + + transforms_ = [] + if target.size[0] != target.size[1]: + transforms_.append( + transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC) + ) + else: + transforms_.append(transforms.Resize(image_size, interpolation=InterpolationMode.BICUBIC)) + transforms_.append(transforms.CenterCrop(image_size)) + + transforms_.append(transforms.ToTensor()) + data_transforms = transforms.Compose(transforms_) + target_ = data_transforms(target).unsqueeze(0).to(self.device) + return target_, mask + + def combine(self, background_output_dir, foreground_output_dir, device, output_size=448): + params_path = foreground_output_dir / "resize_params.npy" + params = None + if params_path.exists(): + params = np.load(params_path, allow_pickle=True)[()] + mask_path = foreground_output_dir / "mask.png" + mask = imageio.imread(mask_path) + mask = resize(mask, (output_size, output_size), anti_aliasing=False) + + object_svg_path = foreground_output_dir / "best_iter.svg" + raster_o = read_svg(object_svg_path, resize_obj=1, params=params, multiply=1.8, device=device) + + background_svg_path = background_output_dir / "best_iter.svg" + raster_b = read_svg(background_svg_path, resize_obj=0, params=params, multiply=1.8, device=device) + + raster_b[mask == 1] = 1 + raster_b[raster_o != 1] = raster_o[raster_o != 1] + raster_b = torch.from_numpy(raster_b).unsqueeze(0).permute(0, 3, 1, 2).to(device) + plot_img(raster_b, self.result_path, fname="combined") diff --git a/pytorch_svgrender/pipelines/CLIPasso_pipeline.py b/pytorch_svgrender/pipelines/CLIPasso_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..0166e35f46e5a3b3e4037a537ace94199800e898 --- /dev/null +++ b/pytorch_svgrender/pipelines/CLIPasso_pipeline.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +from PIL import Image + +import torch +from tqdm.auto import tqdm +from torchvision import transforms +from torchvision.transforms import InterpolationMode +from torchvision.datasets.folder import is_image_file + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.clipasso import Painter, PainterOptimizer, Loss +from pytorch_svgrender.painter.clipasso.sketch_utils import plot_attn, get_mask_u2net, fix_image_scale +from pytorch_svgrender.plt import plot_img, plot_couple, plot_img_title + + +class CLIPassoPipeline(ModelState): + + def __init__(self, args): + logdir_ = f"sd{args.seed}" \ + f"-im{args.x.image_size}" \ + f"{'-mask' if args.x.mask_object else ''}" \ + f"{'-XDoG' if args.x.xdog_intersec else ''}" \ + f"-P{args.x.num_paths}W{args.x.width}{'OP' if args.x.force_sparse else 'BL'}" \ + f"-tau{args.x.softmax_temp}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + def painterly_rendering(self, image_path): + loss_func = Loss(self.x_cfg, self.device) + + # preprocess input image + inputs, mask = self.get_target(image_path, + self.x_cfg.image_size, + self.result_path, + self.x_cfg.u2net_path, + self.x_cfg.mask_object, + self.x_cfg.fix_scale, + self.device) + plot_img(inputs, self.result_path, fname="input") + + # init renderer + renderer = self.load_renderer(inputs, mask) + img = renderer.init_image(stage=0) + self.print("init_image shape: ", img.shape) + plot_img(img, self.result_path, fname="init_img") + + # init optimizer + optimizer = PainterOptimizer(renderer, + self.x_cfg.num_iter, + self.x_cfg.lr, + self.x_cfg.force_sparse, self.x_cfg.color_lr) + optimizer.init_optimizers() + + best_loss, best_fc_loss = 100, 100 + min_delta = 1e-5 + total_step = self.x_cfg.num_iter + + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_step: + sketches = renderer.get_image().to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_step - 1): + plot_img(sketches, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + losses_dict = loss_func(sketches, + inputs.detach(), + renderer.get_color_parameters(), + renderer, + self.step, + optimizer) + loss = sum(list(losses_dict.values())) + + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + if self.x_cfg.lr_schedule: + optimizer.update_lr() + + pbar.set_description(f"L_train: {loss.item():.5f}") + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(inputs, + sketches, + self.step, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.save_svg(self.svg_logs_dir.as_posix(), f"svg_iter{self.step}") + + if self.step % self.args.eval_step == 0 and self.accelerator.is_main_process: + with torch.no_grad(): + losses_dict_eval = loss_func( + sketches, + inputs, + renderer.get_color_parameters(), + renderer.get_point_parameters(), + self.step, + optimizer, + mode="eval" + ) + loss_eval = sum(list(losses_dict_eval.values())) + + cur_delta = loss_eval.item() - best_loss + if abs(cur_delta) > min_delta and cur_delta < 0: + best_loss = loss_eval.item() + best_iter = self.step + plot_couple(inputs, + sketches, + best_iter, + output_dir=self.result_path.as_posix(), + fname="best_iter") + renderer.save_svg(self.result_path.as_posix(), "best_iter") + + if self.step == 0 and self.x_cfg.attention_init and self.accelerator.is_main_process: + plot_attn(renderer.get_attn(), + renderer.get_thresh(), + inputs, + renderer.get_inds(), + (self.result_path / "attention_map.png").as_posix(), + self.x_cfg.saliency_model) + + self.step += 1 + pbar.update(1) + + # log final results + renderer.save_svg(self.result_path.as_posix(), "final_svg") + final_raster_sketch = renderer.get_image().to(self.device) + plot_img_title(final_raster_sketch, + title=f'final result - {self.step} step', + output_dir=self.result_path, + fname='final_render') + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "clipasso_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") + + def load_renderer(self, target_im=None, mask=None): + renderer = Painter(method_cfg=self.x_cfg, + diffvg_cfg=self.args.diffvg, + num_strokes=self.x_cfg.num_paths, + canvas_size=self.x_cfg.image_size, + device=self.device, + target_im=target_im, + mask=mask) + return renderer + + def get_target(self, + target_file, + image_size, + output_dir, + u2net_path, + mask_object, + fix_scale, + device): + if not is_image_file(target_file): + raise TypeError(f"{target_file} is not image file.") + target = Image.open(target_file) + + if target.mode == "RGBA": + # Create a white rgba background + new_image = Image.new("RGBA", target.size, "WHITE") + # Paste the image on the background. + new_image.paste(target, (0, 0), target) + target = new_image + target = target.convert("RGB") + + # U^2 net mask + masked_im, mask = get_mask_u2net(target, output_dir, u2net_path, device) + if mask_object: + target = masked_im + + if fix_scale: + target = fix_image_scale(target) + + transforms_ = [] + if target.size[0] != target.size[1]: + transforms_.append( + transforms.Resize((image_size, image_size), + interpolation=InterpolationMode.BICUBIC) + ) + else: + transforms_.append(transforms.Resize(image_size, + interpolation=InterpolationMode.BICUBIC)) + transforms_.append(transforms.CenterCrop(image_size)) + + transforms_.append(transforms.ToTensor()) + data_transforms = transforms.Compose(transforms_) + target_ = data_transforms(target).unsqueeze(0).to(self.device) + return target_, mask diff --git a/pytorch_svgrender/pipelines/DiffSketcher_pipeline.py b/pytorch_svgrender/pipelines/DiffSketcher_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..bda66e891418a153d4c856be55c9bd82b250fd5a --- /dev/null +++ b/pytorch_svgrender/pipelines/DiffSketcher_pipeline.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import pathlib +from PIL import Image +from functools import partial + +import torch +import torch.nn.functional as F +from torchvision import transforms +from torchvision.datasets.folder import is_image_file +from tqdm.auto import tqdm +import numpy as np +from skimage.color import rgb2gray +import diffusers + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.libs.metric.lpips_origin import LPIPS +from pytorch_svgrender.libs.metric.piq.perceptual import DISTS as DISTS_PIQ +from pytorch_svgrender.libs.metric.clip_score import CLIPScoreWrapper +from pytorch_svgrender.painter.diffsketcher import ( + Painter, SketchPainterOptimizer, Token2AttnMixinASDSPipeline, Token2AttnMixinASDSSDXLPipeline) +from pytorch_svgrender.plt import plot_img, plot_couple +from pytorch_svgrender.painter.diffsketcher.sketch_utils import plt_attn +from pytorch_svgrender.painter.clipasso.sketch_utils import get_mask_u2net, fix_image_scale +from pytorch_svgrender.painter.diffsketcher.stroke_pruning import paths_pruning +from pytorch_svgrender.token2attn.attn_control import AttentionStore, EmptyControl +from pytorch_svgrender.token2attn.ptp_utils import view_images +from pytorch_svgrender.diffusers_warp import init_StableDiffusion_pipeline, model2res + + +class DiffSketcherPipeline(ModelState): + + def __init__(self, args): + attn_log_ = "" + if args.x.attention_init: + attn_log_ = f"-tk{args.x.token_ind}" \ + f"{'-XDoG' if args.x.xdog_intersec else ''}" \ + f"-atc{args.x.attn_coeff}-tau{args.x.softmax_temp}" + logdir_ = f"sd{args.seed}-im{args.x.image_size}" \ + f"-P{args.x.num_paths}W{args.x.width}{'OP' if args.x.optim_opacity else 'BL'}" \ + f"{attn_log_}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + self.attn_logs_dir = self.result_path / "attn_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + self.attn_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + if self.x_cfg.model_id == "sdxl": + # default LSDSSDXLPipeline scheduler is EulerDiscreteScheduler + # when LSDSSDXLPipeline calls, scheduler.timesteps will change in step 4 + # which causes problem in sds add_noise() function + # because the random t may not in scheduler.timesteps + custom_pipeline = Token2AttnMixinASDSSDXLPipeline + custom_scheduler = diffusers.DPMSolverMultistepScheduler + self.x_cfg.cross_attn_res = self.x_cfg.cross_attn_res * 2 + elif self.x_cfg.model_id == 'sd21': + custom_pipeline = Token2AttnMixinASDSPipeline + custom_scheduler = diffusers.DDIMScheduler + else: # sd14, sd15 + custom_pipeline = Token2AttnMixinASDSPipeline + custom_scheduler = diffusers.DDIMScheduler + + self.diffusion = init_StableDiffusion_pipeline( + self.x_cfg.model_id, + custom_pipeline=custom_pipeline, + custom_scheduler=custom_scheduler, + device=self.device, + local_files_only=not args.diffuser.download, + force_download=args.diffuser.force_download, + resume_download=args.diffuser.resume_download, + ldm_speed_up=self.x_cfg.ldm_speed_up, + enable_xformers=self.x_cfg.enable_xformers, + gradient_checkpoint=self.x_cfg.gradient_checkpoint, + ) + + self.g_device = torch.Generator(device=self.device).manual_seed(args.seed) + + # init clip model and clip score wrapper + self.cargs = self.x_cfg.clip + self.clip_score_fn = CLIPScoreWrapper(self.cargs.model_name, + device=self.device, + visual_score=True, + feats_loss_type=self.cargs.feats_loss_type, + feats_loss_weights=self.cargs.feats_loss_weights, + fc_loss_weight=self.cargs.fc_loss_weight) + + def load_render(self, target_img, attention_map, mask=None): + renderer = Painter(self.x_cfg, + self.args.diffvg, + num_strokes=self.x_cfg.num_paths, + num_segments=self.x_cfg.num_segments, + canvas_size=self.x_cfg.image_size, + device=self.device, + target_im=target_img, + attention_map=attention_map, + mask=mask) + return renderer + + def extract_ldm_attn(self, prompts): + # init controller + controller = AttentionStore() if self.x_cfg.attention_init else EmptyControl() + + height = width = model2res(self.x_cfg.model_id) + outputs = self.diffusion(prompt=[prompts], + negative_prompt=self.args.neg_prompt, + height=height, + width=width, + controller=controller, + num_inference_steps=self.x_cfg.num_inference_steps, + guidance_scale=self.x_cfg.guidance_scale, + generator=self.g_device) + + target_file = self.result_path / "ldm_generated_image.png" + view_images([np.array(img) for img in outputs.images], save_image=True, fp=target_file) + + if self.x_cfg.attention_init: + """ldm cross-attention map""" + cross_attention_maps, tokens = \ + self.diffusion.get_cross_attention([prompts], + controller, + res=self.x_cfg.cross_attn_res, + from_where=("up", "down"), + save_path=self.result_path / "cross_attn.png") + + self.print(f"the length of tokens is {len(tokens)}, select {self.x_cfg.token_ind}-th token") + # [res, res, seq_len] + self.print(f"origin cross_attn_map shape: {cross_attention_maps.shape}") + # [res, res] + cross_attn_map = cross_attention_maps[:, :, self.x_cfg.token_ind] + self.print(f"select cross_attn_map shape: {cross_attn_map.shape}\n") + cross_attn_map = 255 * cross_attn_map / cross_attn_map.max() + # [res, res, 3] + cross_attn_map = cross_attn_map.unsqueeze(-1).expand(*cross_attn_map.shape, 3) + # [3, res, res] + cross_attn_map = cross_attn_map.permute(2, 0, 1).unsqueeze(0) + # [3, clip_size, clip_size] + cross_attn_map = F.interpolate(cross_attn_map, size=self.x_cfg.image_size, mode='bicubic') + cross_attn_map = torch.clamp(cross_attn_map, min=0, max=255) + # rgb to gray + cross_attn_map = rgb2gray(cross_attn_map.squeeze(0).permute(1, 2, 0)).astype(np.float32) + # torch to numpy + if cross_attn_map.shape[-1] != self.x_cfg.image_size and cross_attn_map.shape[-2] != self.x_cfg.image_size: + cross_attn_map = cross_attn_map.reshape(self.x_cfg.image_size, self.x_cfg.image_size) + # to [0, 1] + cross_attn_map = (cross_attn_map - cross_attn_map.min()) / (cross_attn_map.max() - cross_attn_map.min()) + + """ldm self-attention map""" + self_attention_maps, svd, vh_ = \ + self.diffusion.get_self_attention_comp([prompts], + controller, + res=self.x_cfg.self_attn_res, + from_where=("up", "down"), + img_size=self.x_cfg.image_size, + max_com=self.x_cfg.max_com, + save_path=self.result_path) + + # comp self-attention map + if self.x_cfg.mean_comp: + self_attn = np.mean(vh_, axis=0) + self.print(f"use the mean of {self.x_cfg.max_com} comps.") + else: + self_attn = vh_[self.x_cfg.comp_idx] + self.print(f"select {self.x_cfg.comp_idx}-th comp.") + # to [0, 1] + self_attn = (self_attn - self_attn.min()) / (self_attn.max() - self_attn.min()) + # visual final self-attention + self_attn_vis = np.copy(self_attn) + self_attn_vis = self_attn_vis * 255 + self_attn_vis = np.repeat(np.expand_dims(self_attn_vis, axis=2), 3, axis=2).astype(np.uint8) + view_images(self_attn_vis, save_image=True, fp=self.result_path / "self-attn-final.png") + + """attention map fusion""" + attn_map = self.x_cfg.attn_coeff * cross_attn_map + (1 - self.x_cfg.attn_coeff) * self_attn + # to [0, 1] + attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min()) + + self.print(f"-> fusion attn_map: {attn_map.shape}") + else: + attn_map = None + + return target_file.as_posix(), attn_map + + @property + def clip_norm_(self): + return transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + + def clip_pair_augment(self, + x: torch.Tensor, + y: torch.Tensor, + im_res: int, + augments: str = "affine_norm", + num_aug: int = 4): + # init augmentations + augment_list = [] + if "affine" in augments: + augment_list.append( + transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5) + ) + augment_list.append( + transforms.RandomResizedCrop(im_res, scale=(0.8, 0.8), ratio=(1.0, 1.0)) + ) + augment_list.append(self.clip_norm_) # CLIP Normalize + + # compose augmentations + augment_compose = transforms.Compose(augment_list) + # make augmentation pairs + x_augs, y_augs = [self.clip_score_fn.normalize(x)], [self.clip_score_fn.normalize(y)] + # repeat N times + for n in range(num_aug): + augmented_pair = augment_compose(torch.cat([x, y])) + x_augs.append(augmented_pair[0].unsqueeze(0)) + y_augs.append(augmented_pair[1].unsqueeze(0)) + xs = torch.cat(x_augs, dim=0) + ys = torch.cat(y_augs, dim=0) + return xs, ys + + def painterly_rendering(self, prompt): + # log prompts + self.print(f"prompt: {prompt}") + self.print(f"negative_prompt: {self.args.neg_prompt}\n") + + # init attention + target_file, attention_map = self.extract_ldm_attn(prompt) + + timesteps_ = self.diffusion.scheduler.timesteps.cpu().numpy().tolist() + self.print(f"{len(timesteps_)} denoising steps, {timesteps_}") + + perceptual_loss_fn = None + if self.x_cfg.perceptual.coeff > 0: + if self.x_cfg.perceptual.name == "lpips": + lpips_loss_fn = LPIPS(net=self.x_cfg.perceptual.lpips_net).to(self.device) + perceptual_loss_fn = partial(lpips_loss_fn.forward, return_per_layer=False, normalize=False) + elif self.x_cfg.perceptual.name == "dists": + perceptual_loss_fn = DISTS_PIQ() + + inputs, mask = self.get_target(target_file, + self.x_cfg.image_size, + self.result_path, + self.x_cfg.u2net_path, + self.x_cfg.mask_object, + self.x_cfg.fix_scale, + self.device) + inputs = inputs.detach() # inputs as GT + self.print("inputs shape: ", inputs.shape) + + # load renderer + renderer = self.load_render(inputs, attention_map, mask=mask) + # init img + img = renderer.init_image(stage=0) + self.print("init_image shape: ", img.shape) + plot_img(img, self.result_path, fname="init_sketch") + # load optimizer + optimizer = SketchPainterOptimizer(renderer, + self.x_cfg.lr, + self.x_cfg.optim_opacity, + self.x_cfg.optim_rgba, + self.x_cfg.color_lr, + self.x_cfg.optim_width, + self.x_cfg.width_lr) + optimizer.init_optimizers() + + # log params + self.print(f"-> Painter point Params: {len(renderer.get_points_params())}") + self.print(f"-> Painter width Params: {len(renderer.get_width_parameters())}") + self.print(f"-> Painter opacity Params: {len(renderer.get_color_parameters())}") + + total_iter = self.x_cfg.num_iter + best_visual_loss, best_semantic_loss = 100, 100 + min_delta = 1e-6 + + self.print(f"\ntotal optimization steps: {total_iter}") + with tqdm(initial=self.step, total=total_iter, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_iter: + raster_sketch = renderer.get_image().to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_iter - 1): + plot_img(raster_sketch, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + # ASDS loss + sds_loss, grad = torch.tensor(0), torch.tensor(0) + if self.step >= self.x_cfg.sds.warmup: + grad_scale = self.x_cfg.sds.grad_scale if self.step > self.x_cfg.sds.warmup else 0 + sds_loss, grad = self.diffusion.score_distillation_sampling( + raster_sketch, + crop_size=self.x_cfg.sds.crop_size, + augments=self.x_cfg.sds.augmentations, + prompt=[prompt], + negative_prompt=self.args.neg_prompt, + guidance_scale=self.x_cfg.sds.guidance_scale, + grad_scale=grad_scale, + t_range=list(self.x_cfg.sds.t_range), + ) + + # CLIP data augmentation + raster_sketch_aug, inputs_aug = self.clip_pair_augment( + raster_sketch, inputs, + im_res=224, + augments=self.cargs.augmentations, + num_aug=self.cargs.num_aug + ) + + # clip visual loss + total_visual_loss = torch.tensor(0) + l_clip_fc, l_clip_conv, clip_conv_loss_sum = torch.tensor(0), [], torch.tensor(0) + if self.x_cfg.clip.vis_loss > 0: + l_clip_fc, l_clip_conv = self.clip_score_fn.compute_visual_distance( + raster_sketch_aug, inputs_aug, clip_norm=False + ) + clip_conv_loss_sum = sum(l_clip_conv) + total_visual_loss = self.x_cfg.clip.vis_loss * (clip_conv_loss_sum + l_clip_fc) + + # text-visual loss + l_tvd = torch.tensor(0.) + if self.cargs.text_visual_coeff > 0: + l_tvd = self.clip_score_fn.compute_text_visual_distance( + raster_sketch_aug, prompt + ) * self.cargs.text_visual_coeff + + # perceptual loss + l_percep = torch.tensor(0.) + if perceptual_loss_fn is not None: + l_perceptual = perceptual_loss_fn(raster_sketch, inputs).mean() + l_percep = l_perceptual * self.x_cfg.perceptual.coeff + + # total loss + loss = sds_loss + total_visual_loss + l_tvd + l_percep + + # optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + # update lr + if self.x_cfg.lr_schedule: + optimizer.update_lr(self.step, self.x_cfg.decay_steps) + + # records + pbar.set_description( + f"lr: {optimizer.get_lr():.2f}, " + f"l_total: {loss.item():.4f}, " + f"l_clip_fc: {l_clip_fc.item():.4f}, " + f"l_clip_conv({len(l_clip_conv)}): {clip_conv_loss_sum.item():.4f}, " + f"l_tvd: {l_tvd.item():.4f}, " + f"l_percep: {l_percep.item():.4f}, " + f"sds: {grad.item():.4e}" + ) + + # log raster and svg + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + # log png + plot_couple(inputs, + raster_sketch, + self.step, + prompt=prompt, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + # log svg + renderer.save_svg(self.svg_logs_dir.as_posix(), f"svg_iter{self.step}") + # log cross attn + if self.x_cfg.log_cross_attn: + controller = AttentionStore() + _, _ = self.diffusion.get_cross_attention([prompt], + controller, + res=self.x_cfg.cross_attn_res, + from_where=("up", "down"), + save_path=self.attn_logs_dir / f"iter{self.step}.png") + + # logging the best raster images and SVG + if self.step % self.args.eval_step == 0 and self.accelerator.is_main_process: + with torch.no_grad(): + # visual metric + l_clip_fc, l_clip_conv = self.clip_score_fn.compute_visual_distance( + raster_sketch_aug, inputs_aug, clip_norm=False + ) + loss_eval = sum(l_clip_conv) + l_clip_fc + + cur_delta = loss_eval.item() - best_visual_loss + if abs(cur_delta) > min_delta and cur_delta < 0: + best_visual_loss = loss_eval.item() + best_iter_v = self.step + plot_couple(inputs, + raster_sketch, + best_iter_v, + prompt=prompt, + output_dir=self.result_path.as_posix(), + fname="visual_best") + renderer.save_svg(self.result_path.as_posix(), "visual_best") + + # semantic metric + loss_eval = self.clip_score_fn.compute_text_visual_distance( + raster_sketch_aug, prompt + ) + cur_delta = loss_eval.item() - best_semantic_loss + if abs(cur_delta) > min_delta and cur_delta < 0: + best_semantic_loss = loss_eval.item() + best_iter_s = self.step + plot_couple(inputs, + raster_sketch, + best_iter_s, + prompt=prompt, + output_dir=self.result_path.as_posix(), + fname="semantic_best") + renderer.save_svg(self.result_path.as_posix(), "semantic_best") + + # log attention, just once + if self.step == 0 and self.x_cfg.attention_init and self.accelerator.is_main_process: + plt_attn(renderer.get_attn(), + renderer.get_thresh(), + inputs, + renderer.get_inds(), + (self.result_path / "attention_map.png").as_posix()) + + self.step += 1 + pbar.update(1) + + # saving final result + renderer.save_svg(self.svg_logs_dir.as_posix(), "final_svg_tmp") + # stroke pruning + if self.args.opacity_delta != 0: + paths_pruning(self.svg_logs_dir / "final_svg_tmp.svg", self.result_path / "final_result.svg", + self.x_cfg.opacity_delta) + + final_raster_sketch = renderer.get_image().to(self.device) + plot_img(final_raster_sketch, + output_dir=self.result_path, + fname='final_render') + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "diffsketcher_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") + + def get_target(self, + target_file, + image_size, + output_dir, + u2net_path, + mask_object, + fix_scale, + device): + if not is_image_file(target_file): + raise TypeError(f"{target_file} is not image file.") + + target = Image.open(target_file) + + if target.mode == "RGBA": + # Create a white rgba background + new_image = Image.new("RGBA", target.size, "WHITE") + # Paste the image on the background. + new_image.paste(target, (0, 0), target) + target = new_image + target = target.convert("RGB") + + # U2Net mask + mask = target + if mask_object: + if pathlib.Path(u2net_path).exists(): + masked_im, mask = get_mask_u2net(target, output_dir, u2net_path, device) + target = masked_im + else: + self.print(f"'{u2net_path}' is not exist, disable mask target") + + if fix_scale: + target = fix_image_scale(target) + + # define image transforms + transforms_ = [] + if target.size[0] != target.size[1]: + transforms_.append(transforms.Resize((image_size, image_size))) + else: + transforms_.append(transforms.Resize(image_size)) + transforms_.append(transforms.CenterCrop(image_size)) + transforms_.append(transforms.ToTensor()) + + # preprocess + data_transforms = transforms.Compose(transforms_) + target_ = data_transforms(target).unsqueeze(0).to(self.device) + + return target_, mask diff --git a/pytorch_svgrender/pipelines/DiffSketcher_stylized_pipeline.py b/pytorch_svgrender/pipelines/DiffSketcher_stylized_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..bb6cd17ffd98fa2cd0b51a52694cc1b1c1c83d1d --- /dev/null +++ b/pytorch_svgrender/pipelines/DiffSketcher_stylized_pipeline.py @@ -0,0 +1,557 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import shutil +import pathlib +from PIL import Image +from functools import partial +from pathlib import Path + +import torch +import torch.nn.functional as F +from torchvision import transforms +from torchvision.datasets.folder import is_image_file +from tqdm.auto import tqdm +import numpy as np +from skimage.color import rgb2gray +import diffusers + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.libs.metric.lpips_origin import LPIPS +from pytorch_svgrender.libs.metric.piq.perceptual import DISTS as DISTS_PIQ +from pytorch_svgrender.libs.metric.clip_score import CLIPScoreWrapper +from pytorch_svgrender.painter.diffsketcher import ( + Painter, SketchPainterOptimizer, Token2AttnMixinASDSPipeline, Token2AttnMixinASDSSDXLPipeline) +from pytorch_svgrender.painter.diffsketcher.sketch_utils import plt_triplet +from pytorch_svgrender.plt import plot_img +from pytorch_svgrender.painter.diffsketcher.sketch_utils import plt_attn +from pytorch_svgrender.painter.clipasso.sketch_utils import get_mask_u2net, fix_image_scale +from pytorch_svgrender.token2attn.attn_control import AttentionStore, EmptyControl +from pytorch_svgrender.token2attn.ptp_utils import view_images +from pytorch_svgrender.painter.style_clipdraw import sample_indices, StyleLoss, VGG16Extractor +from pytorch_svgrender.diffusers_warp import init_StableDiffusion_pipeline, model2res + + +class StylizedDiffSketcherPipeline(ModelState): + + def __init__(self, args): + attn_log_ = "" + if args.x.attention_init: + attn_log_ = f"-tk{args.x.token_ind}" \ + f"{'-XDoG' if args.x.xdog_intersec else ''}" \ + f"-atc{args.x.attn_coeff}-tau{args.x.softmax_temp}" + logdir_ = f"sd{args.seed}-im{args.x.image_size}" \ + f"-ST{args.x.style_strength}" \ + f"-P{args.x.num_paths}W{args.x.width}{'OP' if args.x.optim_opacity else 'BL'}" \ + f"{attn_log_}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + self.attn_logs_dir = self.result_path / "attn_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + self.attn_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + if self.x_cfg.model_id == "sdxl": + # default LSDSSDXLPipeline scheduler is EulerDiscreteScheduler + # when LSDSSDXLPipeline calls, scheduler.timesteps will change in step 4 + # which causes problem in sds add_noise() function + # because the random t may not in scheduler.timesteps + custom_pipeline = Token2AttnMixinASDSSDXLPipeline + custom_scheduler = diffusers.DPMSolverMultistepScheduler + self.x_cfg.cross_attn_res = self.x_cfg.cross_attn_res * 2 + elif self.x_cfg.model_id == 'sd21': + custom_pipeline = Token2AttnMixinASDSPipeline + custom_scheduler = diffusers.DDIMScheduler + else: # sd14, sd15 + custom_pipeline = Token2AttnMixinASDSPipeline + custom_scheduler = diffusers.DDIMScheduler + + self.diffusion = init_StableDiffusion_pipeline( + self.x_cfg.model_id, + custom_pipeline=custom_pipeline, + custom_scheduler=custom_scheduler, + device=self.device, + local_files_only=not args.diffuser.download, + force_download=args.diffuser.force_download, + resume_download=args.diffuser.resume_download, + ldm_speed_up=self.x_cfg.ldm_speed_up, + enable_xformers=self.x_cfg.enable_xformers, + gradient_checkpoint=self.x_cfg.gradient_checkpoint, + ) + + self.g_device = torch.Generator(device=self.device).manual_seed(args.seed) + + # init clip model and clip score wrapper + self.cargs = self.x_cfg.clip + self.clip_score_fn = CLIPScoreWrapper(self.cargs.model_name, + device=self.device, + visual_score=True, + feats_loss_type=self.cargs.feats_loss_type, + feats_loss_weights=self.cargs.feats_loss_weights, + fc_loss_weight=self.cargs.fc_loss_weight) + + # load STROTSS + self.style_extractor = VGG16Extractor(space="normal").to(self.device) + self.style_loss = StyleLoss() + + def extract_ldm_attn(self, prompt): + # log prompts + self.print(f"prompt: {prompt}") + self.print(f"negative_prompt: {self.args.neg_prompt}\n") + + # init controller + controller = AttentionStore() if self.x_cfg.attention_init else EmptyControl() + + height = width = model2res(self.x_cfg.model_id) + outputs = self.diffusion(prompt=[prompt], + negative_prompt=self.args.neg_prompt, + height=height, + width=width, + controller=controller, + num_inference_steps=self.x_cfg.num_inference_steps, + guidance_scale=self.x_cfg.guidance_scale, + generator=self.g_device) + + target_file = self.result_path / "ldm_generated_image.png" + view_images([np.array(img) for img in outputs.images], save_image=True, fp=target_file) + + if self.x_cfg.attention_init: + """ldm cross-attention map""" + cross_attention_maps, tokens = \ + self.diffusion.get_cross_attention([prompt], + controller, + res=self.x_cfg.cross_attn_res, + from_where=("up", "down"), + save_path=self.result_path / "cross_attn.png") + + self.print(f"the length of tokens is {len(tokens)}, select {self.x_cfg.token_ind}-th token") + # [res, res, seq_len] + self.print(f"origin cross_attn_map shape: {cross_attention_maps.shape}") + # [res, res] + cross_attn_map = cross_attention_maps[:, :, self.x_cfg.token_ind] + self.print(f"select cross_attn_map shape: {cross_attn_map.shape}\n") + cross_attn_map = 255 * cross_attn_map / cross_attn_map.max() + # [res, res, 3] + cross_attn_map = cross_attn_map.unsqueeze(-1).expand(*cross_attn_map.shape, 3) + # [3, res, res] + cross_attn_map = cross_attn_map.permute(2, 0, 1).unsqueeze(0) + # [3, clip_size, clip_size] + cross_attn_map = F.interpolate(cross_attn_map, size=self.x_cfg.image_size, mode='bicubic') + cross_attn_map = torch.clamp(cross_attn_map, min=0, max=255) + # rgb to gray + cross_attn_map = rgb2gray(cross_attn_map.squeeze(0).permute(1, 2, 0)).astype(np.float32) + # torch to numpy + if cross_attn_map.shape[-1] != self.x_cfg.image_size and cross_attn_map.shape[-2] != self.x_cfg.image_size: + cross_attn_map = cross_attn_map.reshape(self.x_cfg.image_size, self.x_cfg.image_size) + # to [0, 1] + cross_attn_map = (cross_attn_map - cross_attn_map.min()) / (cross_attn_map.max() - cross_attn_map.min()) + + """ldm self-attention map""" + self_attention_maps, svd, vh_ = \ + self.diffusion.get_self_attention_comp([prompt], + controller, + res=self.x_cfg.self_attn_res, + from_where=("up", "down"), + img_size=self.x_cfg.image_size, + max_com=self.x_cfg.max_com, + save_path=self.result_path) + + # comp self-attention map + if self.x_cfg.mean_comp: + self_attn = np.mean(vh_, axis=0) + self.print(f"use the mean of {self.x_cfg.max_com} comps.") + else: + self_attn = vh_[self.x_cfg.comp_idx] + self.print(f"select {self.x_cfg.comp_idx}-th comp.") + # to [0, 1] + self_attn = (self_attn - self_attn.min()) / (self_attn.max() - self_attn.min()) + # visual final self-attention + self_attn_vis = np.copy(self_attn) + self_attn_vis = self_attn_vis * 255 + self_attn_vis = np.repeat(np.expand_dims(self_attn_vis, axis=2), 3, axis=2).astype(np.uint8) + self_attn_vis = Image.fromarray(self_attn_vis) + self_attn_vis = np.array(self_attn_vis) + view_images(self_attn_vis, save_image=True, fp=self.result_path / "self-attn-final.png") + + """attention map fusion""" + attn_map = self.x_cfg.attn_coeff * cross_attn_map + (1 - self.x_cfg.attn_coeff) * self_attn + # to [0, 1] + attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min()) + + self.print(f"-> fusion attn_map: {attn_map.shape}") + else: + attn_map = None + + return target_file.as_posix(), attn_map + + def load_render(self, target_img, attention_map, mask=None): + renderer = Painter(self.x_cfg, + self.args.diffvg, + num_strokes=self.x_cfg.num_paths, + num_segments=self.x_cfg.num_segments, + canvas_size=self.x_cfg.image_size, + device=self.device, + target_im=target_img, + attention_map=attention_map, + mask=mask) + return renderer + + @property + def clip_norm_(self): + return transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + + def clip_pair_augment(self, + x: torch.Tensor, + y: torch.Tensor, + im_res: int, + augments: str = "affine_norm", + num_aug: int = 4): + # init augmentations + augment_list = [] + if "affine" in augments: + augment_list.append( + transforms.RandomPerspective(fill=0, p=1.0, distortion_scale=0.5) + ) + augment_list.append( + transforms.RandomResizedCrop(im_res, scale=(0.8, 0.8), ratio=(1.0, 1.0)) + ) + augment_list.append(self.clip_norm_) # CLIP Normalize + + # compose augmentations + augment_compose = transforms.Compose(augment_list) + # make augmentation pairs + x_augs, y_augs = [self.clip_score_fn.normalize(x)], [self.clip_score_fn.normalize(y)] + # repeat N times + for n in range(num_aug): + augmented_pair = augment_compose(torch.cat([x, y])) + x_augs.append(augmented_pair[0].unsqueeze(0)) + y_augs.append(augmented_pair[1].unsqueeze(0)) + xs = torch.cat(x_augs, dim=0) + ys = torch.cat(y_augs, dim=0) + return xs, ys + + def painterly_rendering(self, prompt, style_fpath): + # init attention + target_file, attention_map = self.extract_ldm_attn(prompt) + + timesteps_ = self.diffusion.scheduler.timesteps.cpu().numpy().tolist() + self.print(f"{len(timesteps_)} denoising steps, {timesteps_}") + + perceptual_loss_fn = None + if self.x_cfg.perceptual.coeff > 0: + if self.x_cfg.perceptual.name == "lpips": + lpips_loss_fn = LPIPS(net=self.x_cfg.perceptual.lpips_net).to(self.device) + perceptual_loss_fn = partial(lpips_loss_fn.forward, return_per_layer=False, normalize=False) + elif self.x_cfg.perceptual.name == "dists": + perceptual_loss_fn = DISTS_PIQ() + + style_img, feat_style = self.load_and_process_style_file(style_fpath) + + inputs, mask = self.get_target(target_file, + self.x_cfg.image_size, + self.result_path, + self.x_cfg.u2net_path, + self.x_cfg.mask_object, + self.x_cfg.fix_scale, + self.device) + inputs = inputs.detach() # inputs as GT + self.print("inputs shape: ", inputs.shape) + + # load renderer + renderer = self.load_render(inputs, attention_map, mask=mask) + # init img + img = renderer.init_image(stage=0) + self.print("init_image shape: ", img.shape) + plot_img(img, self.result_path, fname="init_sketch") + # load optimizer + optimizer = SketchPainterOptimizer(renderer, + self.x_cfg.lr, + self.x_cfg.optim_opacity, + self.x_cfg.optim_rgba, + self.x_cfg.color_lr, + self.x_cfg.optim_width, + self.x_cfg.width_lr) + optimizer.init_optimizers() + + # log params + self.print(f"-> Painter point Params: {len(renderer.get_points_params())}") + self.print(f"-> Painter width Params: {len(renderer.get_width_parameters())}") + self.print(f"-> Painter color Params: {len(renderer.get_color_parameters())}") + + total_iter = self.x_cfg.num_iter + best_visual_loss, best_semantic_loss = 100, 100 + min_delta = 1e-6 + + self.print(f"\ntotal optimization steps: {total_iter}") + with tqdm(initial=self.step, total=total_iter, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_iter: + raster_sketch = renderer.get_image().to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_iter - 1): + plot_img(raster_sketch, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + # ASDS loss + sds_loss, grad = torch.tensor(0), torch.tensor(0) + if self.step >= self.x_cfg.sds.warmup: + grad_scale = self.x_cfg.sds.grad_scale if self.step > self.x_cfg.sds.warmup else 0 + sds_loss, grad = self.diffusion.score_distillation_sampling( + raster_sketch, + crop_size=self.x_cfg.sds.crop_size, + augments=self.x_cfg.sds.augmentations, + prompt=[prompt], + negative_prompt=self.args.neg_prompt, + guidance_scale=self.x_cfg.sds.guidance_scale, + grad_scale=grad_scale, + t_range=list(self.x_cfg.sds.t_range), + ) + + # CLIP data augmentation + raster_sketch_aug, inputs_aug = self.clip_pair_augment( + raster_sketch, inputs, + im_res=224, + augments=self.cargs.augmentations, + num_aug=self.cargs.num_aug + ) + + # clip visual loss + total_visual_loss = torch.tensor(0) + l_clip_fc, l_clip_conv, clip_conv_loss_sum = torch.tensor(0), [], torch.tensor(0) + if self.x_cfg.clip.vis_loss > 0: + l_clip_fc, l_clip_conv = self.clip_score_fn.compute_visual_distance( + raster_sketch_aug, inputs_aug, clip_norm=False + ) + clip_conv_loss_sum = sum(l_clip_conv) + total_visual_loss = self.x_cfg.clip.vis_loss * (clip_conv_loss_sum + l_clip_fc) + + # text-visual loss + l_tvd = torch.tensor(0.) + if self.cargs.text_visual_coeff > 0: + l_tvd = self.clip_score_fn.compute_text_visual_distance( + raster_sketch_aug, prompt + ) * self.cargs.text_visual_coeff + + # perceptual loss + l_percep = torch.tensor(0.) + if perceptual_loss_fn is not None: + l_perceptual = perceptual_loss_fn(raster_sketch, inputs).mean() + l_percep = l_perceptual * self.x_cfg.perceptual.coeff + + # style loss + feat_content = self.style_extractor(raster_sketch) + xx, xy = sample_indices(feat_content[0], feat_style) + np.random.shuffle(xx) + np.random.shuffle(xy) + l_style = self.x_cfg.style_strength * self.style_loss.forward( + feat_content, feat_content, feat_style, [xx, xy], 0 + ) + + # total loss + loss = sds_loss + total_visual_loss + l_tvd + l_percep + l_style + + # optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + # update lr + if self.x_cfg.lr_schedule: + optimizer.update_lr(self.step, self.x_cfg.decay_steps) + + # records + pbar.set_description( + f"lr: {optimizer.get_lr():.2f}, " + f"l_total: {loss.item():.4f}, " + f"l_clip_fc: {l_clip_fc.item():.4f}, " + f"l_clip_conv({len(l_clip_conv)}): {clip_conv_loss_sum.item():.4f}, " + f"l_tvd: {l_tvd.item():.4f}, " + f"l_percep: {l_percep.item():.4f}, " + f"l_style: {l_style.item():.4f}, " + f"sds: {grad.item():.4e}" + ) + + # log raster and svg + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + # log png + plt_triplet(inputs, + raster_sketch, + style_img, + self.step, + prompt, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + # log svg + renderer.save_svg(self.svg_logs_dir.as_posix(), f"svg_iter{self.step}") + # log cross attn + if self.x_cfg.log_cross_attn: + controller = AttentionStore() + _, _ = self.diffusion.get_cross_attention([prompt], + controller, + res=self.x_cfg.cross_attn_res, + from_where=("up", "down"), + save_path=self.attn_logs_dir / f"iter{self.step}.png") + + # logging the best raster images and SVG + if self.step % self.args.eval_step == 0 and self.accelerator.is_main_process: + with torch.no_grad(): + # visual metric + l_clip_fc, l_clip_conv = self.clip_score_fn.compute_visual_distance( + raster_sketch_aug, inputs_aug, clip_norm=False + ) + loss_eval = sum(l_clip_conv) + l_clip_fc + + cur_delta = loss_eval.item() - best_visual_loss + if abs(cur_delta) > min_delta and cur_delta < 0: + best_visual_loss = loss_eval.item() + best_iter_v = self.step + plt_triplet(inputs, + raster_sketch, + style_img, + best_iter_v, + prompt, + output_dir=self.result_path.as_posix(), + fname="visual_best") + renderer.save_svg(self.result_path.as_posix(), "visual_best") + + # semantic metric + loss_eval = self.clip_score_fn.compute_text_visual_distance( + raster_sketch_aug, prompt + ) + cur_delta = loss_eval.item() - best_semantic_loss + if abs(cur_delta) > min_delta and cur_delta < 0: + best_semantic_loss = loss_eval.item() + best_iter_s = self.step + plt_triplet(inputs, + raster_sketch, + style_img, + best_iter_s, + prompt, + output_dir=self.result_path.as_posix(), + fname="semantic_best") + renderer.save_svg(self.result_path.as_posix(), "semantic_best") + + # log attention, for once + if self.step == 0 and self.x_cfg.attention_init and self.accelerator.is_main_process: + plt_attn(renderer.get_attn(), + renderer.get_thresh(), + inputs, + renderer.get_inds(), + (self.result_path / "attention_map.png").as_posix()) + + self.step += 1 + pbar.update(1) + + # saving final result + renderer.save_svg(self.result_path.as_posix(), f"final_best_step") + + final_raster_sketch = renderer.get_image().to(self.device) + plot_img(final_raster_sketch, + output_dir=self.result_path, + fname='final_best_step') + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "stylediffsketcher_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") + + def load_and_process_style_file(self, style_fpath): + # load style file + style_path = Path(style_fpath) + assert style_path.exists(), f"{style_fpath} is not exist!" + style_img = self.style_file_preprocess(style_path.as_posix()) + self.print(f"load style file from: {style_path.as_posix()}") + shutil.copy(style_fpath, self.result_path) # copy style file + + # extract style features from style image + feat_style = None + for i in range(5): + with torch.no_grad(): + # r is region of interest (mask) + feat_e = self.style_extractor.forward_samples_hypercolumn(style_img, samps=1000) + feat_style = feat_e if feat_style is None else torch.cat((feat_style, feat_e), dim=2) + + return style_img, feat_style + + def style_file_preprocess(self, style_path): + process_comp = transforms.Compose([ + transforms.Resize(size=(224, 224)), + transforms.ToTensor(), + # transforms.Lambda(lambda t: t - 0.5), + transforms.Lambda(lambda t: t.unsqueeze(0)), + # transforms.Lambda(lambda t: (t + 1) / 2), + ]) + + style_pil = Image.open(style_path).convert("RGB") # open file + style_file = process_comp(style_pil) # preprocess + style_file = style_file.to(self.device) + return style_file + + def get_target(self, + target_file, + image_size, + output_dir, + u2net_path, + mask_object, + fix_scale, + device): + if not is_image_file(target_file): + raise TypeError(f"{target_file} is not image file.") + + target = Image.open(target_file) + + if target.mode == "RGBA": + # Create a white rgba background + new_image = Image.new("RGBA", target.size, "WHITE") + # Paste the image on the background. + new_image.paste(target, (0, 0), target) + target = new_image + target = target.convert("RGB") + + # U2Net mask + mask = target + if mask_object: + if pathlib.Path(u2net_path).exists(): + masked_im, mask = get_mask_u2net(target, output_dir, u2net_path, device) + target = masked_im + else: + self.print(f"'{u2net_path}' is not exist, disable mask target") + + if fix_scale: + target = fix_image_scale(target) + + if fix_scale: + target = fix_image_scale(target) + + # define image transforms + transforms_ = [] + if target.size[0] != target.size[1]: + transforms_.append(transforms.Resize((image_size, image_size))) + else: + transforms_.append(transforms.Resize(image_size)) + transforms_.append(transforms.CenterCrop(image_size)) + transforms_.append(transforms.ToTensor()) + + # preprocess + data_transforms = transforms.Compose(transforms_) + target_ = data_transforms(target).unsqueeze(0).to(self.device) + + return target_, mask diff --git a/pytorch_svgrender/pipelines/DiffVG_pipeline.py b/pytorch_svgrender/pipelines/DiffVG_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..b5260c3b63d45d9b825e4d3baf97da56f2cd0997 --- /dev/null +++ b/pytorch_svgrender/pipelines/DiffVG_pipeline.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: LIVE pipeline +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +import shutil +from pathlib import Path +from functools import partial +from typing import AnyStr +from PIL import Image + +from tqdm.auto import tqdm +import torch +from torchvision import transforms + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.diffvg import Painter, PainterOptimizer +from pytorch_svgrender.plt import plot_img, plot_couple +from pytorch_svgrender.libs.metric.lpips_origin import LPIPS + + +class DiffVGPipeline(ModelState): + + def __init__(self, args): + logdir_ = f"sd{args.seed}" \ + f"-{args.x.path_type}" \ + f"-P{args.x.num_paths}" + super().__init__(args, log_path_suffix=logdir_) + + assert self.x_cfg.path_type in ['unclosed', 'closed'] + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + def target_file_preprocess(self, tar_path): + process_comp = transforms.Compose([ + transforms.ToTensor(), + transforms.Lambda(lambda t: t.unsqueeze(0)), + ]) + + tar_pil = Image.open(tar_path).convert("RGB") # open file + target_img = process_comp(tar_pil) # preprocess + target_img = target_img.to(self.device) + return target_img + + def painterly_rendering(self, img_path: AnyStr): + # load target file + target_file = Path(img_path) + assert target_file.exists(), f"{target_file} is not exist!" + shutil.copy(target_file, self.result_path) # copy target file + target_img = self.target_file_preprocess(target_file.as_posix()) + self.print(f"load image from: '{target_file.as_posix()}'") + + # init Painter + renderer = Painter(target_img, + self.args.diffvg, + canvas_size=[target_img.shape[3], target_img.shape[2]], + path_type=self.x_cfg.path_type, + max_width=self.x_cfg.max_width, + device=self.device) + init_img = renderer.init_image(num_paths=self.x_cfg.num_paths) + self.print("init_image shape: ", init_img.shape) + plot_img(init_img, self.result_path, fname="init_img") + + # init Painter Optimizer + num_iter = self.x_cfg.num_iter + optimizer = PainterOptimizer(renderer, + num_iter, + self.x_cfg.lr_base, + trainable_stroke=self.x_cfg.path_type == 'unclosed') + optimizer.init_optimizer() + + # Set Loss + if self.x_cfg.loss_type in ['lpips', 'l2+lpips']: + lpips_loss_fn = LPIPS(net=self.x_cfg.perceptual.lpips_net).to(self.device) + perceptual_loss_fn = partial(lpips_loss_fn.forward, return_per_layer=False, normalize=False) + + with tqdm(initial=self.step, total=num_iter, disable=not self.accelerator.is_main_process) as pbar: + while self.step < num_iter: + raster_img = renderer.get_image(self.step).to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == num_iter - 1): + plot_img(raster_img, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + # Reconstruction Loss + if self.x_cfg.loss_type == 'l1': + loss_recon = torch.nn.functional.l1_loss(raster_img, target_img) + elif self.x_cfg.loss_type == 'lpips': + loss_recon = perceptual_loss_fn(raster_img, target_img).mean() + elif self.x_cfg.loss_type == 'l2': # default: MSE loss + loss_recon = torch.nn.functional.mse_loss(raster_img, target_img) + elif self.x_cfg.loss_type == 'l2+lpips': # default: MSE loss + lpips = perceptual_loss_fn(raster_img, target_img).mean() + loss_mse = torch.nn.functional.mse_loss(raster_img, target_img) + loss_recon = loss_mse + lpips + + # total loss + loss = loss_recon + + pbar.set_description( + f"lr: {optimizer.get_lr():.4f}, " + f"L_recon: {loss_recon.item():.4f}" + ) + + # optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + renderer.clip_curve_shape() + + if self.x_cfg.lr_schedule: + optimizer.update_lr() + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(target_img, + raster_img, + self.step, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.save_svg(self.svg_logs_dir / f"svg_iter{self.step}.svg") + + self.step += 1 + pbar.update(1) + + # end rendering + renderer.save_svg(self.result_path / "final_svg.svg") + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "live_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") diff --git a/pytorch_svgrender/pipelines/LIVE_pipeline.py b/pytorch_svgrender/pipelines/LIVE_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..88907c04786f656d25e05714fde98ef947220a45 --- /dev/null +++ b/pytorch_svgrender/pipelines/LIVE_pipeline.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: LIVE pipeline +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +import shutil +from pathlib import Path +from typing import AnyStr +from PIL import Image + +from tqdm.auto import tqdm +import torch +from torchvision import transforms + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.live import Painter, PainterOptimizer, xing_loss_fn +from pytorch_svgrender.plt import plot_img, plot_couple + + +class LIVEPipeline(ModelState): + + def __init__(self, args): + logdir_ = f"sd{args.seed}" \ + f"-im{args.x.image_size}" \ + f"-P{args.x.num_paths}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + def get_path_schedule(self, schedule_each): + if self.x_cfg.path_schedule == 'repeat': + return int(self.x_cfg.num_paths / schedule_each) * [schedule_each] + elif self.x_cfg.path_schedule == 'list': + assert isinstance(self.x_cfg.schedule_each, list) + return schedule_each + else: + raise NotImplementedError + + def target_file_preprocess(self, tar_path): + process_comp = transforms.Compose([ + transforms.Resize(size=(self.x_cfg.image_size, self.x_cfg.image_size)), + transforms.ToTensor(), + transforms.Lambda(lambda t: t.unsqueeze(0)), + ]) + + tar_pil = Image.open(tar_path).convert("RGB") # open file + target_img = process_comp(tar_pil) # preprocess + target_img = target_img.to(self.device) + return target_img + + def painterly_rendering(self, img_path: AnyStr): + # load target file + target_file = Path(img_path) + assert target_file.exists(), f"{target_file} is not exist!" + shutil.copy(target_file, self.result_path) # copy target file + target_img = self.target_file_preprocess(target_file.as_posix()) + self.print(f"load image file from: '{target_file.as_posix()}'") + + # log path_schedule + path_schedule = self.get_path_schedule(self.x_cfg.schedule_each) + self.print(f"path_schedule: {path_schedule}") + + renderer = Painter(target_img, + self.args.diffvg, + self.x_cfg.num_segments, + self.x_cfg.segment_init, + self.x_cfg.radius, + canvas_size=self.x_cfg.image_size, + trainable_bg=self.x_cfg.trainable_bg, + stroke=self.x_cfg.train_stroke, + stroke_width=self.x_cfg.width, + device=self.device) + # first init center + renderer.component_wise_path_init(pred=None, init_type=self.x_cfg.coord_init) + + num_iter = self.x_cfg.num_iter + + optimizer_list = [ + PainterOptimizer(renderer, num_iter, self.x_cfg.lr_base, + self.x_cfg.train_stroke, self.x_cfg.trainable_bg) + for _ in range(len(path_schedule)) + ] + + pathn_record = [] + loss_weight_keep = 0 + loss_weight = 1 + + total_step = len(path_schedule) * num_iter + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + + for path_idx, pathn in enumerate(path_schedule): + # record path + pathn_record.append(pathn) + # init graphic + img = renderer.init_image(num_paths=pathn) + plot_img(img, self.result_path, fname=f"init_img_{path_idx}") + # rebuild optimizer + optimizer_list[path_idx].init_optimizers() + + pbar.write(f"=> adding {pathn} paths, n_path: {sum(pathn_record)}, " + f"path_schedule: {self.x_cfg.path_schedule}") + + for t in range(num_iter): + raster_img = renderer.get_image(step=t).to(self.device) + + if self.make_video and (t % self.args.framefreq == 0 or t == num_iter - 1): + plot_img(raster_img, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + if self.x_cfg.use_distance_weighted_loss: + loss_weight = renderer.calc_distance_weight(loss_weight_keep) + + # UDF Loss for Reconstruction + if self.x_cfg.use_l1_loss: + loss_recon = torch.nn.functional.l1_loss(raster_img, target_img) + else: # default: MSE loss + loss_mse = ((raster_img - target_img) ** 2) + loss_recon = (loss_mse.sum(1) * loss_weight).mean() + + # Xing Loss for Self-Interaction Problem + loss_xing = xing_loss_fn(renderer.get_point_parameters()) * self.x_cfg.xing_loss_weight + # total loss + loss = loss_recon + loss_xing + + pbar.set_description( + f"lr: {optimizer_list[path_idx].get_lr():.4f}, " + f"L_total: {loss.item():.4f}, " + f"L_recon: {loss_recon.item():.4f}, " + f"L_xing: {loss_xing.item()}" + ) + + # optimization + for i in range(path_idx + 1): + optimizer_list[i].zero_grad_() + + loss.backward() + + for i in range(path_idx + 1): + optimizer_list[i].step_() + + renderer.clip_curve_shape() + + if self.x_cfg.lr_schedule: + for i in range(path_idx + 1): + optimizer_list[i].update_lr() + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(target_img, + raster_img, + self.step, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.save_svg(self.svg_logs_dir / f"svg_iter{self.step}.svg") + + self.step += 1 + pbar.update(1) + + # end a set of path optimization + if self.x_cfg.use_distance_weighted_loss: + loss_weight_keep = loss_weight.detach().cpu().numpy() * 1 + # recalculate the coordinates for the new join path + renderer.component_wise_path_init(pred=raster_img, init_type=self.x_cfg.coord_init) + + renderer.save_svg(self.result_path / "final_svg.svg") + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "live_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") diff --git a/pytorch_svgrender/pipelines/SVGDreamer_pipeline.py b/pytorch_svgrender/pipelines/SVGDreamer_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..5a52dcb44701538e979c75c5fdc06076a784cedb --- /dev/null +++ b/pytorch_svgrender/pipelines/SVGDreamer_pipeline.py @@ -0,0 +1,366 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import pathlib +from PIL import Image +from typing import AnyStr + +import numpy as np +from tqdm.auto import tqdm +import torch +from torch.optim.lr_scheduler import LambdaLR +import torchvision +from torchvision import transforms + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.libs.solver.optim import get_optimizer +from pytorch_svgrender.painter.svgdreamer import Painter, PainterOptimizer +from pytorch_svgrender.painter.svgdreamer.painter_params import CosineWithWarmupLRLambda +from pytorch_svgrender.painter.live import xing_loss_fn +from pytorch_svgrender.painter.svgdreamer import VectorizedParticleSDSPipeline +from pytorch_svgrender.plt import plot_img +from pytorch_svgrender.utils.color_attrs import init_tensor_with_color +from pytorch_svgrender.token2attn.ptp_utils import view_images +from pytorch_svgrender.diffusers_warp import model2res + +import ImageReward as RM + + +class SVGDreamerPipeline(ModelState): + + def __init__(self, args): + assert args.x.style in ["iconography", "pixelart", "low-poly", "painting", "sketch", "ink"] + assert args.x.guidance.n_particle >= args.x.guidance.vsd_n_particle + assert args.x.guidance.n_particle >= args.x.guidance.phi_n_particle + assert args.x.guidance.n_phi_sample >= 1 + + logdir_ = f"sd{args.seed}" \ + f"-{'vpsd' if args.x.skip_sive else 'sive'}" \ + f"-{args.x.model_id}" \ + f"-{args.x.style}" \ + f"-P{args.x.num_paths}" \ + f"{'-RePath' if args.x.path_reinit.use else ''}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + self.ft_png_logs_dir = self.result_path / "ft_png_logs" + self.ft_svg_logs_dir = self.result_path / "ft_svg_logs" + self.sd_sample_dir = self.result_path / 'sd_samples' + self.reinit_dir = self.result_path / "reinit_logs" + self.init_stage_two_dir = self.result_path / "stage_two_init_logs" + self.phi_samples_dir = self.result_path / "phi_sampling_logs" + + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + self.ft_png_logs_dir.mkdir(parents=True, exist_ok=True) + self.ft_svg_logs_dir.mkdir(parents=True, exist_ok=True) + self.sd_sample_dir.mkdir(parents=True, exist_ok=True) + self.reinit_dir.mkdir(parents=True, exist_ok=True) + self.init_stage_two_dir.mkdir(parents=True, exist_ok=True) + self.phi_samples_dir.mkdir(parents=True, exist_ok=True) + + self.select_fpth = self.result_path / 'select_sample.png' + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + self.g_device = torch.Generator(device=self.device).manual_seed(args.seed) + + self.pipeline = VectorizedParticleSDSPipeline(args, args.diffuser, self.x_cfg.guidance, self.device) + + # load reward model + self.reward_model = None + if self.x_cfg.guidance.phi_ReFL: + self.reward_model = RM.load("ImageReward-v1.0", device=self.device, download_root=self.x_cfg.reward_path) + + self.style = self.x_cfg.style + if self.style == "pixelart": + self.x_cfg.lr_stage_one.lr_schedule = False + self.x_cfg.lr_stage_two.lr_schedule = False + + def target_file_preprocess(self, tar_path: AnyStr): + process_comp = transforms.Compose([ + transforms.Resize(size=(self.x_cfg.image_size, self.x_cfg.image_size)), + transforms.ToTensor(), + transforms.Lambda(lambda t: t.unsqueeze(0)), + ]) + + tar_pil = Image.open(tar_path).convert("RGB") # open file + target_img = process_comp(tar_pil) # preprocess + target_img = target_img.to(self.device) + return target_img + + def SIVE_stage(self, text_prompt: str): + # TODO: SIVE implementation + pass + + def painterly_rendering(self, text_prompt: str, target_file: AnyStr = None): + # log prompts + self.print(f"prompt: {text_prompt}") + self.print(f"neg_prompt: {self.args.neg_prompt}\n") + + # for convenience + im_size = self.x_cfg.image_size + guidance_cfg = self.x_cfg.guidance + n_particle = self.x_cfg.guidance.n_particle + total_step = self.x_cfg.guidance.num_iter + path_reinit = self.x_cfg.path_reinit + + init_from_target = True if (target_file and pathlib.Path(target_file).exists()) else False + # switch mode + if self.x_cfg.skip_sive and not init_from_target: + # mode 1: optimization with VPSD from scratch + # randomly init + self.print("optimization with VPSD from scratch...") + if self.x_cfg.color_init == 'rand': + target_img = torch.randn(1, 3, im_size, im_size) + self.print("color: randomly init") + else: + target_img = init_tensor_with_color(self.x_cfg.color_init, 1, im_size, im_size) + self.print(f"color: {self.x_cfg.color_init}") + + # log init target_img + plot_img(target_img, self.result_path, fname='init_target_img') + final_svg_path = None + elif init_from_target: + # mode 2: load the SVG file and finetune it + self.print(f"load svg from {target_file} ...") + self.print(f"SVG fine-tuning via VPSD...") + final_svg_path = target_file + if self.x_cfg.color_init == 'target_randn': + # special order: init newly paths color use random color + target_img = torch.randn(1, 3, im_size, im_size) + self.print("color: randomly init") + else: + # load the SVG and init newly paths color use target_img + # note: the target will be converted to png via pydiffvg when load_renderer called + target_img = None + else: + # mode 3: text-to-img-to-svg (two stage) + target_img, final_svg_path = self.SIVE_stage(text_prompt) + self.x_cfg.path_svg = final_svg_path + self.print("\n SVG fine-tuning via VPSD...") + plot_img(target_img, self.result_path, fname='init_target_img') + + # create svg renderer + renderers = [self.load_renderer(final_svg_path) for _ in range(n_particle)] + + # randomly initialize the particles + if self.x_cfg.skip_sive or init_from_target: + if target_img is None: + target_img = self.target_file_preprocess(self.result_path / 'target_img.png') + for render in renderers: + render.component_wise_path_init(gt=target_img, pred=None, init_type='random') + + # log init images + for i, r in enumerate(renderers): + init_imgs = r.init_image(stage=0, num_paths=self.x_cfg.num_paths) + plot_img(init_imgs, self.init_stage_two_dir, fname=f"init_img_stage_two_{i}") + + # init renderer optimizer + optimizers = [] + for renderer in renderers: + optim_ = PainterOptimizer(renderer, + self.style, + guidance_cfg.num_iter, + self.x_cfg.lr_stage_two, + self.x_cfg.trainable_bg) + optim_.init_optimizers() + optimizers.append(optim_) + + # init phi_model optimizer + phi_optimizer = get_optimizer('adamW', + self.pipeline.phi_params, + guidance_cfg.phi_lr, + guidance_cfg.phi_optim) + # init phi_model lr scheduler + phi_scheduler = None + schedule_cfg = guidance_cfg.phi_schedule + if schedule_cfg.use: + phi_lr_lambda = CosineWithWarmupLRLambda(num_steps=schedule_cfg.total_step, + warmup_steps=schedule_cfg.warmup_steps, + warmup_start_lr=schedule_cfg.warmup_start_lr, + warmup_end_lr=schedule_cfg.warmup_end_lr, + cosine_end_lr=schedule_cfg.cosine_end_lr) + phi_scheduler = LambdaLR(phi_optimizer, lr_lambda=phi_lr_lambda, last_epoch=-1) + + self.print(f"-> Painter point Params: {len(renderers[0].get_point_parameters())}") + self.print(f"-> Painter color Params: {len(renderers[0].get_color_parameters())}") + self.print(f"-> Painter width Params: {len(renderers[0].get_width_parameters())}") + + L_reward = torch.tensor(0.) + + self.step = 0 # reset global step + self.print(f"\ntotal VPSD optimization steps: {total_step}") + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_step: + # set particles + particles = [renderer.get_image() for renderer in renderers] + raster_imgs = torch.cat(particles, dim=0) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_step - 1): + plot_img(raster_imgs, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + L_guide, grad, latents, t_step = self.pipeline.variational_score_distillation( + raster_imgs, + self.step, + prompt=[text_prompt], + negative_prompt=self.args.neg_prompt, + grad_scale=guidance_cfg.grad_scale, + enhance_particle=guidance_cfg.particle_aug, + im_size=model2res(self.x_cfg.model_id) + ) + + # Xing Loss for Self-Interaction Problem + L_add = torch.tensor(0.) + if self.style == "iconography" or self.x_cfg.xing_loss.use: + for r in renderers: + L_add += xing_loss_fn(r.get_point_parameters()) * self.x_cfg.xing_loss.weight + + loss = L_guide + L_add + + # optimization + for opt_ in optimizers: + opt_.zero_grad_() + loss.backward() + for opt_ in optimizers: + opt_.step_() + + # phi_model optimization + for _ in range(guidance_cfg.phi_update_step): + L_lora = self.pipeline.train_phi_model(latents, guidance_cfg.phi_t, as_latent=True) + + phi_optimizer.zero_grad() + L_lora.backward() + phi_optimizer.step() + + # reward learning + if guidance_cfg.phi_ReFL and self.step % guidance_cfg.phi_sample_step == 0: + with torch.no_grad(): + phi_outputs = [] + phi_sample_paths = [] + for idx in range(guidance_cfg.n_phi_sample): + phi_output = self.pipeline.sample(text_prompt, + num_inference_steps=guidance_cfg.phi_infer_step, + generator=self.g_device) + sample_path = (self.phi_samples_dir / f'iter{idx}.png').as_posix() + phi_output.images[0].save(sample_path) + phi_sample_paths.append(sample_path) + + phi_output_np = np.array(phi_output.images[0]) + phi_outputs.append(phi_output_np) + # save all samples + view_images(phi_outputs, save_image=True, + num_rows=max(len(phi_outputs) // 6, 1), + fp=self.phi_samples_dir / f'samples_iter{self.step}.png') + + ranking, rewards = self.reward_model.inference_rank(text_prompt, phi_sample_paths) + self.print(f"ranking: {ranking}, reward score: {rewards}") + + for k in range(guidance_cfg.n_phi_sample): + phi = self.target_file_preprocess(phi_sample_paths[ranking[k] - 1]) + L_reward = self.pipeline.train_phi_model_refl(phi, weight=rewards[k]) + + phi_optimizer.zero_grad() + L_reward.backward() + phi_optimizer.step() + + # update the learning rate of the phi_model + if phi_scheduler is not None: + phi_scheduler.step() + + # curve regularization + for r in renderers: + r.clip_curve_shape() + + # re-init paths + if self.step % path_reinit.freq == 0 and self.step < path_reinit.stop_step and self.step != 0: + for i, r in enumerate(renderers): + r.reinitialize_paths(path_reinit.use, # on-off + path_reinit.opacity_threshold, + path_reinit.area_threshold, + fpath=self.reinit_dir / f"reinit-{self.step}_p{i}.svg") + + # update lr + if self.x_cfg.lr_stage_two.lr_schedule: + for opt_ in optimizers: + opt_.update_lr() + + # log pretrained model lr + lr_str = "" + for k, lr in optimizers[0].get_lr().items(): + lr_str += f"{k}_lr: {lr:.4f}, " + # log phi model lr + cur_phi_lr = phi_optimizer.param_groups[0]['lr'] + lr_str += f"phi_lr: {cur_phi_lr:.3e}, " + + pbar.set_description( + lr_str + + f"t: {t_step.item():.2f}, " + f"L_total: {loss.item():.4f}, " + f"L_add: {L_add.item():.4e}, " + f"L_lora: {L_lora.item():.4f}, " + f"L_reward: {L_reward.item():.4f}, " + f"vpsd: {grad.item():.4e}" + ) + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + # save png + torchvision.utils.save_image(raster_imgs, + fp=self.ft_png_logs_dir / f'iter{self.step}.png') + + # save svg + for i, r in enumerate(renderers): + r.pretty_save_svg(self.ft_svg_logs_dir / f"svg_iter{self.step}_p{i}.svg") + + self.step += 1 + pbar.update(1) + + # save final + for i, r in enumerate(renderers): + final_svg_path = self.result_path / f"finetune_final_p_{i}.svg" + r.pretty_save_svg(final_svg_path) + # save SVGs + torchvision.utils.save_image(raster_imgs, fp=self.result_path / f'all_particles.png') + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "svgdreamer_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") + + def load_renderer(self, path_svg=None): + renderer = Painter(self.args.diffvg, + self.style, + self.x_cfg.num_segments, + self.x_cfg.segment_init, + self.x_cfg.radius, + self.x_cfg.image_size, + self.x_cfg.grid, + self.x_cfg.trainable_bg, + self.x_cfg.width, + path_svg=path_svg, + device=self.device) + + # if load a svg file, then rasterize it + save_path = self.result_path / 'target_img.png' + if path_svg is not None and (not save_path.exists()): + canvas_width, canvas_height, shapes, shape_groups = renderer.load_svg(path_svg) + render_img = renderer.render_image(canvas_width, canvas_height, shapes, shape_groups) + torchvision.utils.save_image(render_img, fp=save_path) + return renderer diff --git a/pytorch_svgrender/pipelines/StyleCLIPDraw_pipeline.py b/pytorch_svgrender/pipelines/StyleCLIPDraw_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..8559083b5c7df0a2c4a20f92d9ab117ba6c143af --- /dev/null +++ b/pytorch_svgrender/pipelines/StyleCLIPDraw_pipeline.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +import shutil +from PIL import Image +from pathlib import Path + +import torch +from torchvision import transforms +import clip +from tqdm.auto import tqdm +import numpy as np + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.style_clipdraw import ( + Painter, PainterOptimizer, VGG16Extractor, StyleLoss, sample_indices +) +from pytorch_svgrender.plt import plot_img, plot_couple + + +class StyleCLIPDrawPipeline(ModelState): + + def __init__(self, args): + logdir_ = f"sd{args.seed}" \ + f"-P{args.x.num_paths}" \ + f"-style{args.x.style_strength}" \ + f"-n{args.x.num_aug}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + self.clip, self.tokenize_fn = self.init_clip() + + self.style_extractor = VGG16Extractor(space="normal").to(self.device) + self.style_loss = StyleLoss() + + def init_clip(self): + model, _ = clip.load('ViT-B/32', self.device, jit=False) + return model, clip.tokenize + + def drawing_augment(self, image): + augment_trans = transforms.Compose([ + transforms.RandomPerspective(fill=1, p=1, distortion_scale=0.5), + transforms.RandomResizedCrop(224, scale=(0.7, 0.9)), + transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) + ]) + + # image augmentation transformation + img_augs = [] + for n in range(self.x_cfg.num_aug): + img_augs.append(augment_trans(image)) + im_batch = torch.cat(img_augs) + # clip visual encoding + image_features = self.clip.encode_image(im_batch) + + return image_features + + def style_file_preprocess(self, style_file): + process_comp = transforms.Compose([ + transforms.Resize(size=(224, 224)), + transforms.ToTensor(), + transforms.Lambda(lambda t: t.unsqueeze(0)), + transforms.Lambda(lambda t: (t + 1) / 2), + ]) + style_file = process_comp(style_file) + style_file = style_file.to(self.device) + return style_file + + def painterly_rendering(self, prompt, style_fpath): + # load style file + style_path = Path(style_fpath) + assert style_path.exists(), f"{style_fpath} is not exist!" + self.print(f"load style file from: {style_path.as_posix()}") + style_pil = Image.open(style_path.as_posix()).convert("RGB") + style_img = self.style_file_preprocess(style_pil) + shutil.copy(style_fpath, self.result_path) # copy style file + + # extract style features from style image + feat_style = None + for i in range(5): + with torch.no_grad(): + # r is region of interest (mask) + feat_e = self.style_extractor.forward_samples_hypercolumn(style_img, samps=1000) + feat_style = feat_e if feat_style is None else torch.cat((feat_style, feat_e), dim=2) + + # text prompt encoding + self.print(f"prompt: {prompt}") + text_tokenize = self.tokenize_fn(prompt).to(self.device) + with torch.no_grad(): + text_features = self.clip.encode_text(text_tokenize) + + renderer = Painter(self.x_cfg, + self.args.diffvg, + num_strokes=self.x_cfg.num_paths, + canvas_size=self.x_cfg.image_size, + device=self.device) + img = renderer.init_image(stage=0) + self.print("init_image shape: ", img.shape) + plot_img(img, self.result_path, fname="init_img") + + optimizer = PainterOptimizer(renderer, self.x_cfg.lr, self.x_cfg.width_lr, self.x_cfg.color_lr) + optimizer.init_optimizers() + + style_weight = 4 * (self.x_cfg.style_strength / 100) + self.print(f'style_weight: {style_weight}') + + total_step = self.x_cfg.num_iter + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_step: + rendering = renderer.get_image(self.step).to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_step - 1): + plot_img(rendering, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + rendering_aug = self.drawing_augment(rendering) + + loss = torch.tensor(0., device=self.device) + + # do clip optimization + if self.step < 0.9 * total_step: + for n in range(self.x_cfg.num_aug): + loss -= torch.cosine_similarity(text_features, rendering_aug[n:n + 1], dim=1).mean() + + # do style optimization + # extract style features based on the approach from STROTSS [Kolkin et al., 2019]. + feat_content = self.style_extractor(rendering) + + xx, xy = sample_indices(feat_content[0], feat_style) + + np.random.shuffle(xx) + np.random.shuffle(xy) + + L_style = self.style_loss.forward(feat_content, feat_content, feat_style, [xx, xy], 0) + + loss += L_style * style_weight + + pbar.set_description( + f"lr: {optimizer.get_lr():.3f}, " + f"L_train: {loss.item():.4f}, " + f"L_style: {L_style.item():.4f}" + ) + + # optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + renderer.clip_curve_shape() + + if self.x_cfg.lr_schedule: + optimizer.update_lr(self.step) + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(style_img, + rendering, + self.step, + prompt=prompt, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.save_svg(self.svg_logs_dir.as_posix(), f"svg_iter{self.step}") + + self.step += 1 + pbar.update(1) + + plot_couple(style_img, + rendering, + self.step, + prompt=prompt, + output_dir=self.result_path.as_posix(), + fname=f"final_iter") + renderer.save_svg(self.result_path.as_posix(), "final_svg") + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "styleclipdraw_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") diff --git a/pytorch_svgrender/pipelines/VectorFusion_pipeline.py b/pytorch_svgrender/pipelines/VectorFusion_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..cec26c4b419b44f80f74e7ffeaa9fb246a71d9b1 --- /dev/null +++ b/pytorch_svgrender/pipelines/VectorFusion_pipeline.py @@ -0,0 +1,430 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from PIL import Image +from typing import Union, AnyStr, List + +from omegaconf.listconfig import ListConfig +import diffusers +import numpy as np +from tqdm.auto import tqdm +import torch +from torchvision import transforms +import clip + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.vectorfusion import LSDSPipeline, LSDSSDXLPipeline, Painter, PainterOptimizer +from pytorch_svgrender.painter.vectorfusion import channel_saturation_penalty_loss as pixel_penalty_loss +from pytorch_svgrender.painter.live import xing_loss_fn +from pytorch_svgrender.plt import plot_img, plot_couple +from pytorch_svgrender.token2attn.ptp_utils import view_images +from pytorch_svgrender.diffusers_warp import init_StableDiffusion_pipeline, model2res + + +class VectorFusionPipeline(ModelState): + + def __init__(self, args): + assert args.x.style in ["iconography", "pixelart", "low-poly", "painting", "sketch", "ink"] + + logdir_ = f"sd{args.seed}-" \ + f"{'scratch' if args.x.skip_live else 'baseline'}" \ + f"-{args.x.model_id}" \ + f"-{args.x.style}" \ + f"-P{args.x.num_paths}" \ + f"{'-RePath' if args.x.path_reinit.use else ''}" + super().__init__(args, log_path_suffix=logdir_) + + # create log dir + self.png_logs_dir = self.result_path / "png_logs" + self.svg_logs_dir = self.result_path / "svg_logs" + self.ft_png_logs_dir = self.result_path / "ft_png_logs" + self.ft_svg_logs_dir = self.result_path / "ft_svg_logs" + self.sd_sample_dir = self.result_path / 'sd_samples' + self.reinit_dir = self.result_path / "reinit_logs" + + if self.accelerator.is_main_process: + self.png_logs_dir.mkdir(parents=True, exist_ok=True) + self.svg_logs_dir.mkdir(parents=True, exist_ok=True) + self.ft_png_logs_dir.mkdir(parents=True, exist_ok=True) + self.ft_svg_logs_dir.mkdir(parents=True, exist_ok=True) + self.sd_sample_dir.mkdir(parents=True, exist_ok=True) + self.reinit_dir.mkdir(parents=True, exist_ok=True) + + self.select_fpth = self.result_path / 'select_sample.png' + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + if self.x_cfg.model_id == "sdxl": + # default LSDSSDXLPipeline scheduler is EulerDiscreteScheduler + # when LSDSSDXLPipeline calls, scheduler.timesteps will change in step 4 + # which causes problem in sds add_noise() function + # because the random t may not in scheduler.timesteps + custom_pipeline = LSDSSDXLPipeline + custom_scheduler = diffusers.DPMSolverMultistepScheduler + elif self.x_cfg.model_id == 'sd21': + custom_pipeline = LSDSPipeline + custom_scheduler = diffusers.DDIMScheduler + else: # sd14, sd15 + custom_pipeline = LSDSPipeline + custom_scheduler = diffusers.PNDMScheduler + + self.diffusion = init_StableDiffusion_pipeline( + self.x_cfg.model_id, + custom_pipeline=custom_pipeline, + custom_scheduler=custom_scheduler, + device=self.device, + local_files_only=not args.diffuser.download, + force_download=args.diffuser.force_download, + resume_download=args.diffuser.resume_download, + ldm_speed_up=self.x_cfg.ldm_speed_up, + enable_xformers=self.x_cfg.enable_xformers, + gradient_checkpoint=self.x_cfg.gradient_checkpoint, + lora_path=self.x_cfg.lora_path + ) + + self.g_device = torch.Generator(device=self.device).manual_seed(args.seed) + + self.style = self.x_cfg.style + if self.style in ["pixelart", "low-poly"]: + self.x_cfg.path_schedule = 'list' + self.x_cfg.schedule_each = list([args.x.grid]) + + if self.style == "pixelart": + self.x_cfg.lr_stage_one.lr_schedule = False + self.x_cfg.lr_stage_two.lr_schedule = False + + def get_path_schedule(self, schedule_each: Union[int, List]): + if self.x_cfg.path_schedule == 'repeat': + return int(self.x_cfg.num_paths / schedule_each) * [schedule_each] + elif self.x_cfg.path_schedule == 'list': + assert isinstance(self.x_cfg.schedule_each, list) or \ + isinstance(self.x_cfg.schedule_each, ListConfig) + return schedule_each + else: + raise NotImplementedError + + def target_file_preprocess(self, tar_path: AnyStr): + process_comp = transforms.Compose([ + transforms.Resize(size=(self.x_cfg.image_size, self.x_cfg.image_size)), + transforms.ToTensor(), + transforms.Lambda(lambda t: t.unsqueeze(0)), + ]) + + tar_pil = Image.open(tar_path).convert("RGB") # open file + target_img = process_comp(tar_pil) # preprocess + target_img = target_img.to(self.device) + return target_img + + @torch.no_grad() + def rejection_sampling(self, img_caption: Union[AnyStr, List], diffusion_samples: List): + clip_model, preprocess = clip.load("ViT-B/32", device=self.device) + + text_input = clip.tokenize([img_caption]).to(self.device) + text_features = clip_model.encode_text(text_input) + text_features = text_features / text_features.norm(dim=-1, keepdim=True) + + clip_images = torch.stack([ + preprocess(sample) for sample in diffusion_samples] + ).to(self.device) + image_features = clip_model.encode_image(clip_images) + image_features = image_features / image_features.norm(dim=-1, keepdim=True) + + # clip score + similarity_scores = (text_features @ image_features.T).squeeze(0) + + selected_image_index = similarity_scores.argmax().item() + selected_image = diffusion_samples[selected_image_index] + return selected_image + + def diffusion_sampling(self, text_prompt: AnyStr): + """sampling K images""" + diffusion_samples = [] + for i in range(self.x_cfg.K): + height = width = model2res(self.x_cfg.model_id) + outputs = self.diffusion(prompt=[text_prompt], + negative_prompt=self.args.neg_prompt, + height=height, + width=width, + num_images_per_prompt=1, + num_inference_steps=self.x_cfg.num_inference_steps, + guidance_scale=self.x_cfg.guidance_scale, + generator=self.g_device) + outputs_np = [np.array(img) for img in outputs.images] + view_images(outputs_np, save_image=True, fp=self.sd_sample_dir / f'samples_{i}.png') + diffusion_samples.extend(outputs.images) + + self.print(f"num_generated_samples: {len(diffusion_samples)}, shape: {outputs_np[0].shape}") + + return diffusion_samples + + def LIVE_rendering(self, text_prompt: AnyStr): + select_fpth = self.select_fpth + # sampling K images + diffusion_samples = self.diffusion_sampling(text_prompt) + # rejection sampling + select_target = self.rejection_sampling(text_prompt, diffusion_samples) + select_target_pil = Image.fromarray(np.asarray(select_target)) # numpy to PIL + select_target_pil.save(select_fpth) + + # load target file + assert select_fpth.exists(), f"{select_fpth} is not exist!" + target_img = self.target_file_preprocess(select_fpth.as_posix()) + self.print(f"load target file from: {select_fpth.as_posix()}") + + # log path_schedule + path_schedule = self.get_path_schedule(self.x_cfg.schedule_each) + self.print(f"path_schedule: {path_schedule}") + + renderer = self.load_renderer() + # first init center + renderer.component_wise_path_init(target_img, pred=None, init_type=self.x_cfg.coord_init) + + optimizer_list = [PainterOptimizer(renderer, self.style, self.x_cfg.num_iter, + self.x_cfg.lr_stage_one, self.x_cfg.trainable_bg) + for _ in range(len(path_schedule))] + + pathn_record = [] + loss_weight_keep = 0 + + total_step = len(path_schedule) * self.x_cfg.num_iter + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + for path_idx, pathn in enumerate(path_schedule): + # record path + pathn_record.append(pathn) + # init graphic + img = renderer.init_image(stage=0, num_paths=pathn) + plot_img(img, self.result_path, fname=f"init_img_{path_idx}") + # rebuild optimizer + optimizer_list[path_idx].init_optimizers(pid_delta=int(path_idx * pathn)) + + pbar.write(f"=> adding {pathn} paths, n_path: {sum(pathn_record)}, " + f"n_points: {len(renderer.get_point_parameters())}, " + f"n_colors: {len(renderer.get_color_parameters())}") + + for t in range(self.x_cfg.num_iter): + raster_img = renderer.get_image(step=t).to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_step - 1): + plot_img(raster_img, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + if self.x_cfg.use_distance_weighted_loss and not (self.style == "pixelart"): + loss_weight = renderer.calc_distance_weight(loss_weight_keep) + + # reconstruction loss + if self.style == "pixelart": + loss_recon = torch.nn.functional.l1_loss(raster_img, target_img) + else: # UDF loss + loss_recon = ((raster_img - target_img) ** 2) + loss_recon = (loss_recon.sum(1) * loss_weight).mean() + + # Xing Loss for Self-Interaction Problem + loss_xing = torch.tensor(0.) + if self.style == "iconography": + loss_xing = xing_loss_fn(renderer.get_point_parameters()) * self.x_cfg.xing_loss_weight + + # total loss + loss = loss_recon + loss_xing + + lr_str = "" + for k, lr in optimizer_list[path_idx].get_lr().items(): + lr_str += f"{k}_lr: {lr:.4f}, " + + pbar.set_description( + lr_str + + f"L_total: {loss.item():.4f}, " + f"L_recon: {loss_recon.item():.4f}, " + f"L_xing: {loss_xing.item()}" + ) + + # optimization + for i in range(path_idx + 1): + optimizer_list[i].zero_grad_() + + loss.backward() + + for i in range(path_idx + 1): + optimizer_list[i].step_() + + renderer.clip_curve_shape() + + if self.x_cfg.lr_stage_one.lr_schedule: + for i in range(path_idx + 1): + optimizer_list[i].update_lr() + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(target_img, + raster_img, + self.step, + prompt=text_prompt, + output_dir=self.png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.pretty_save_svg(self.svg_logs_dir / f"svg_iter{self.step}.svg") + + self.step += 1 + pbar.update(1) + + # end a set of path optimization + if self.x_cfg.use_distance_weighted_loss and not (self.style == "pixelart"): + loss_weight_keep = loss_weight.detach().cpu().numpy() * 1 + # recalculate the coordinates for the new join path + renderer.component_wise_path_init(target_img, raster_img) + + # end LIVE + final_svg_fpth = self.result_path / "live_stage_one_final.svg" + renderer.pretty_save_svg(final_svg_fpth) + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "VF_rendering_stage1.mp4").as_posix() + ]) + + return target_img, final_svg_fpth + + def painterly_rendering(self, text_prompt: AnyStr): + # log prompts + self.print(f"prompt: {text_prompt}") + self.print(f"negative_prompt: {self.args.neg_prompt}\n") + + if self.x_cfg.skip_live: + target_img = torch.randn(1, 3, self.x_cfg.image_size, self.x_cfg.image_size) + final_svg_fpth = None + self.print("from scratch with Score Distillation Sampling...") + else: + # text-to-img-to-svg + target_img, final_svg_fpth = self.LIVE_rendering(text_prompt) + torch.cuda.empty_cache() + self.x_cfg.path_svg = final_svg_fpth + self.print("\nfine-tune SVG via Score Distillation Sampling...") + + renderer = self.load_renderer(path_svg=final_svg_fpth) + + if self.x_cfg.skip_live: + renderer.component_wise_path_init(target_img, pred=None, init_type='random') + + img = renderer.init_image(stage=0, num_paths=self.x_cfg.num_paths) + plot_img(img, self.result_path, fname=f"init_img_stage_two") + + optimizer = PainterOptimizer(renderer, self.style, + self.x_cfg.sds.num_iter, + self.x_cfg.lr_stage_two, + self.x_cfg.trainable_bg) + optimizer.init_optimizers() + + self.print(f"-> Painter point Params: {len(renderer.get_point_parameters())}") + self.print(f"-> Painter color Params: {len(renderer.get_color_parameters())}") + self.print(f"-> Painter width Params: {len(renderer.get_width_parameters())}") + + self.step = 0 # reset global step + total_step = self.x_cfg.sds.num_iter + path_reinit = self.x_cfg.path_reinit + + self.print(f"\ntotal sds optimization steps: {total_step}") + with tqdm(initial=self.step, total=total_step, disable=not self.accelerator.is_main_process) as pbar: + while self.step < total_step: + raster_img = renderer.get_image(step=self.step).to(self.device) + + if self.make_video and (self.step % self.args.framefreq == 0 or self.step == total_step - 1): + plot_img(raster_img, self.frame_log_dir, fname=f"iter{self.frame_idx}") + self.frame_idx += 1 + + L_sds, grad = self.diffusion.score_distillation_sampling( + raster_img, + im_size=self.x_cfg.sds.im_size, + prompt=[text_prompt], + negative_prompt=self.args.neg_prompt, + guidance_scale=self.x_cfg.sds.guidance_scale, + grad_scale=self.x_cfg.sds.grad_scale, + t_range=list(self.x_cfg.sds.t_range), + ) + # Xing Loss for Self-Interaction Problem + L_add = torch.tensor(0.) + if self.style == "iconography": + L_add = xing_loss_fn(renderer.get_point_parameters()) * self.x_cfg.xing_loss_weight + # pixel_penalty_loss to combat oversaturation + if self.style in ["pixelart", "low-poly"]: + L_add = pixel_penalty_loss(raster_img) * self.x_cfg.penalty_weight + + loss = L_sds + L_add + + # optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + renderer.clip_curve_shape() + + # re-init paths + if self.step % path_reinit.freq == 0 and self.step < path_reinit.stop_step and self.step != 0: + renderer.reinitialize_paths(path_reinit.use, # on-off + path_reinit.opacity_threshold, + path_reinit.area_threshold, + fpath=self.reinit_dir / f"reinit-{self.step}.svg") + + # update lr + if self.x_cfg.lr_stage_two.lr_schedule: + optimizer.update_lr() + + lr_str = "" + for k, lr in optimizer.get_lr().items(): + lr_str += f"{k}_lr: {lr:.4f}, " + + pbar.set_description( + lr_str + + f"L_total: {loss.item():.4f}, " + f"L_add: {L_add.item():.4e}, " + f"sds: {grad.item():.5e}" + ) + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(target_img, + raster_img, + self.step, + prompt=text_prompt, + output_dir=self.ft_png_logs_dir.as_posix(), + fname=f"iter{self.step}") + renderer.pretty_save_svg(self.ft_svg_logs_dir / f"svg_iter{self.step}.svg") + + self.step += 1 + pbar.update(1) + + final_svg_fpth = self.result_path / "finetune_final.svg" + renderer.pretty_save_svg(final_svg_fpth) + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "VF_rendering_stage2.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") + + def load_renderer(self, path_svg=None): + renderer = Painter(self.args.diffvg, + self.style, + self.x_cfg.num_segments, + self.x_cfg.segment_init, + self.x_cfg.radius, + self.x_cfg.image_size, + self.x_cfg.grid, + self.x_cfg.trainable_bg, + self.x_cfg.width, + path_svg=path_svg, + device=self.device) + return renderer diff --git a/pytorch_svgrender/pipelines/WordAsImage_pipeline.py b/pytorch_svgrender/pipelines/WordAsImage_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..689c9243b474b2a31e80a08077443c548d1674db --- /dev/null +++ b/pytorch_svgrender/pipelines/WordAsImage_pipeline.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: +from pathlib import Path + +from tqdm.auto import tqdm +import torch + +from pytorch_svgrender.libs.engine import ModelState +from pytorch_svgrender.painter.wordasimage import Painter, PainterOptimizer +from pytorch_svgrender.painter.wordasimage.losses import ToneLoss, ConformalLoss +from pytorch_svgrender.painter.vectorfusion import LSDSPipeline +from pytorch_svgrender.plt import plot_img, plot_couple +from pytorch_svgrender.diffusers_warp import init_StableDiffusion_pipeline +from pytorch_svgrender.svgtools import FONT_LIST + + +class WordAsImagePipeline(ModelState): + + def __init__(self, args): + # assert + assert args.x.optim_letter in args.x.word + assert Path(args.x.font_path).exists(), f"{args.x.font_path} is not exist." + assert args.x.font in FONT_LIST, f"{args.x.font} is not currently supported." + + # make logdir + logdir_ = f"sd{args.seed}" \ + f"-im{args.x.image_size}" \ + f"-{args.x.word}-{args.x.optim_letter}" + super().__init__(args, log_path_suffix=logdir_) + + # log dir + self.png_log_dir = self.result_path / "png_logs" + self.svg_log_dir = self.result_path / "svg_logs" + # font + self.font = self.x_cfg.font + self.font_path = self.x_cfg.font_path + self.optim_letter = self.x_cfg.optim_letter + # letter + self.letter = self.x_cfg.optim_letter + self.target_letter = self.result_path / f"{self.font}_{self.optim_letter}_scaled.svg" + # make log dir + if self.accelerator.is_main_process: + self.png_log_dir.mkdir(parents=True, exist_ok=True) + self.svg_log_dir.mkdir(parents=True, exist_ok=True) + + # make video log + self.make_video = self.args.mv + if self.make_video: + self.frame_idx = 0 + self.frame_log_dir = self.result_path / "frame_logs" + self.frame_log_dir.mkdir(parents=True, exist_ok=True) + + self.diffusion = init_StableDiffusion_pipeline( + self.x_cfg.model_id, + custom_pipeline=LSDSPipeline, + device=self.device, + local_files_only=not args.diffuser.download, + force_download=args.diffuser.force_download, + resume_download=args.diffuser.resume_download, + ldm_speed_up=self.x_cfg.ldm_speed_up, + enable_xformers=self.x_cfg.enable_xformers, + gradient_checkpoint=self.x_cfg.gradient_checkpoint, + lora_path=self.x_cfg.lora_path + ) + + self.g_device = torch.Generator(device=self.device).manual_seed(args.seed) + + def painterly_rendering(self, word, semantic_concept, optimized_letter): + prompt = semantic_concept + ". " + self.x_cfg.prompt_suffix + self.print(f"prompt: {prompt}") + + # load the optimized letter + renderer = Painter(self.font, canvas_size=self.x_cfg.image_size, device=self.device) + + # font to svg + self.print(f"font type: {self.font}\n") + renderer.preprocess_font(word, + optimized_letter, + self.x_cfg.level_of_cc, + self.font_path, + self.result_path.as_posix()) + + # init letter shape + img_init = renderer.init_shape(self.target_letter) + plot_img(img_init, self.result_path, fname="word_init") + + # save init letter + renderer.pretty_save_svg(self.result_path / "letter_init.svg") + init_letter = renderer.get_image() + + n_iter = self.x_cfg.num_iter + + # init optimizer and lr_schedular + optimizer = PainterOptimizer(renderer, n_iter, self.x_cfg.lr) + optimizer.init_optimizers() + + # init Tone loss + if self.x_cfg.tone_loss.use: + tone_loss = ToneLoss(self.x_cfg.tone_loss) + tone_loss.set_image_init(img_init) + + # init conformal loss + if self.x_cfg.conformal.use: + conformal_loss = ConformalLoss(renderer.get_point_parameters(), + renderer.shape_groups, + optimized_letter, self.device) + + with tqdm(initial=self.step, total=n_iter, disable=not self.accelerator.is_main_process) as pbar: + for i in range(n_iter): + + raster_img = renderer.get_image(step=i) + + if self.make_video and (i % self.args.framefreq == 0 or i == n_iter - 1): + plot_img(raster_img, self.frame_log_dir, fname=f"iter{self.step}") + + L_sds, grad = self.diffusion.score_distillation_sampling( + raster_img, + im_size=self.x_cfg.sds.im_size, + prompt=[prompt], + negative_prompt=self.args.neg_prompt, + guidance_scale=self.x_cfg.sds.guidance_scale, + grad_scale=self.x_cfg.sds.grad_scale, + t_range=list(self.x_cfg.sds.t_range), + ) + + loss = L_sds + + if self.x_cfg.tone_loss.use: + tone_loss_res = tone_loss(raster_img, step=i) + loss = loss + tone_loss_res + + if self.x_cfg.conformal.use: + loss_angles = conformal_loss() + loss_angles = self.x_cfg.conformal.angeles_w * loss_angles + loss = loss + loss_angles + + pbar.set_description( + f"n_params: {len(renderer.get_point_parameters())}, " + f"lr: {optimizer.get_lr():.4f}, " + f"L_total: {loss.item():.4f}, " + ) + + # optimization + optimizer.zero_grad_() + loss.backward() + optimizer.step_() + + if self.x_cfg.lr_schedule: + optimizer.update_lr() + + if self.step % self.args.save_step == 0 and self.accelerator.is_main_process: + plot_couple(init_letter, + raster_img, + self.step, + output_dir=self.png_log_dir.as_posix(), + fname=f"iter{self.step}", + prompt=prompt) + renderer.pretty_save_svg(self.svg_log_dir / f"svg_iter{self.step}.svg") + + self.step += 1 + pbar.update(1) + + # save final optimized letter + renderer.pretty_save_svg(self.result_path / "final_letter.svg") + + # combine word + renderer.combine_word(word, optimized_letter, self.font, self.result_path) + + if self.make_video: + from subprocess import call + call([ + "ffmpeg", + "-framerate", f"{self.args.framerate}", + "-i", (self.frame_log_dir / "iter%d.png").as_posix(), + "-vb", "20M", + (self.result_path / "wordasimg_rendering.mp4").as_posix() + ]) + + self.close(msg="painterly rendering complete.") diff --git a/pytorch_svgrender/pipelines/__init__.py b/pytorch_svgrender/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad761f2f5443eb41b15afc4116a66ecdfa9d918 --- /dev/null +++ b/pytorch_svgrender/pipelines/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: diff --git a/pytorch_svgrender/plt/__init__.py b/pytorch_svgrender/plt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5383680195eca3db97c2329894d0b111e6af889b --- /dev/null +++ b/pytorch_svgrender/plt/__init__.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Copyright (c) 2023, XiMing Xing. +# License: MPL-2.0 License + +from typing import AnyStr + +import matplotlib.pyplot as plt +import torch +from torchvision.utils import make_grid + + +def plot_couple(input_1: torch.Tensor, + input_2: torch.Tensor, + step: int, + output_dir: str, + fname: str, # file name + prompt: str = '', # text prompt as image tile + dpi: int = 300): + if input_1.shape != input_2.shape: + raise ValueError("inputs and outputs must have the same dimensions") + + plt.figure() + plt.subplot(1, 2, 1) # nrows=1, ncols=2, index=1 + grid = make_grid(input_1, normalize=True, pad_value=2) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + plt.imshow(ndarr) + plt.axis("off") + plt.title("Input") + + plt.subplot(1, 2, 2) # nrows=1, ncols=2, index=2 + grid = make_grid(input_2, normalize=True, pad_value=2) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + plt.imshow(ndarr) + plt.axis("off") + plt.title(f"Rendering - {step} steps") + + def insert_newline(string, point=9): + # split by blank + words = string.split() + if len(words) <= point: + return string + + word_chunks = [words[i:i + point] for i in range(0, len(words), point)] + new_string = "\n".join(" ".join(chunk) for chunk in word_chunks) + return new_string + + plt.suptitle(insert_newline(prompt), fontsize=10) + + plt.tight_layout() + plt.savefig(f"{output_dir}/{fname}.png", dpi=dpi) + plt.close() + + +def plot_img(inputs: torch.Tensor, + output_dir: AnyStr, + fname: str, # file name + dpi: int = 100): + assert torch.is_tensor(inputs), f"The input must be tensor type, but got {type(inputs)}" + + grid = make_grid(inputs, normalize=True, pad_value=2) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + plt.imshow(ndarr) + plt.axis("off") + plt.tight_layout() + plt.savefig(f"{output_dir}/{fname}.png", dpi=dpi, bbox_inches='tight') + plt.close() + + +def plot_img_title(inputs: torch.Tensor, + title: str, + output_dir: AnyStr, + fname: str, # file name + dpi: int = 500): + assert torch.is_tensor(inputs), f"The input must be tensor type, but got {type(inputs)}" + + grid = make_grid(inputs, normalize=True, pad_value=2) + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to("cpu", torch.uint8).numpy() + plt.imshow(ndarr) + plt.axis("off") + plt.title(f"{title}") + plt.savefig(f"{output_dir}/{fname}.png", dpi=dpi) + plt.close() diff --git a/pytorch_svgrender/svgtools/__init__.py b/pytorch_svgrender/svgtools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec9083cd96c4ef4df6ab9ae8ffefc2d71c6e9491 --- /dev/null +++ b/pytorch_svgrender/svgtools/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +from .tff import FONT_LIST +from .type import is_valid_svg +from .merge import merge_svg_files +from .process import delete_empty_path, add_def_tag + +__all__ = [ + 'is_valid_svg', + 'merge_svg_files', + 'FONT_LIST', + 'delete_empty_path', 'add_def_tag' +] diff --git a/pytorch_svgrender/svgtools/merge.py b/pytorch_svgrender/svgtools/merge.py new file mode 100644 index 0000000000000000000000000000000000000000..ef16353417a811cfa2d53f84641d8367e8c9b704 --- /dev/null +++ b/pytorch_svgrender/svgtools/merge.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: SVGDreamer - merge +# Copyright (c) 2023, XiMing Xing. +# License: MIT License +from typing import Tuple, AnyStr + +import omegaconf +from svgpathtools import svg2paths, wsvg + +from .type import is_valid_svg +from .shape import * + + +def merge_svg_files( + svg_path_1: AnyStr, + svg_path_2: AnyStr, + merge_type: str, + output_svg_path: AnyStr, + out_size: Tuple[int, int], # e.g.: (600, 600) +): + is_valid_svg(svg_path_1) + is_valid_svg(svg_path_2) + + # set merge ops + if merge_type.startswith('vert'): # Move up/down vertically + if '+' in merge_type: # move up + move_val = merge_type.split("+")[1] + move_val = int(move_val) + elif '-' in merge_type: # move down + move_val = merge_type.split("-")[1] + move_val = -int(move_val) + else: + raise NotImplemented(f'{merge_type} is invalid.') + + merge_svg_by_group(svg_path_1, svg_path_2, + cp_offset=(0, move_val), + svg_out=output_svg_path, out_size=out_size) + + elif merge_type.startswith('cp'): # Move all control points + if '+' in merge_type: + move_val = merge_type.split("+")[1] + move_val = int(move_val) + elif '-' in merge_type: + move_val = merge_type.split("-")[1] + move_val = -int(move_val) + else: + raise NotImplemented(f'{merge_type} is invalid.') + + merge_svg_by_cp(svg_path_1, svg_path_2, + p_offset=move_val, + svg_out=output_svg_path, out_size=out_size) + + elif merge_type == 'simple': # simply combine two SVG files + simple_merge(svg_path_1, svg_path_2, output_svg_path, out_size) + else: + raise NotImplemented(f'{str(merge_type)} is not support !') + + +def simple_merge(svg_path1, svg_path2, output_path, out_size): + # read svg to paths + paths1, attributes1 = svg2paths(svg_path1) + paths2, attributes2 = svg2paths(svg_path2) + # merge path and attributes + paths = paths1 + paths2 + attributes = attributes1 + attributes2 + # write merged svg + wsvg(paths, + attributes=attributes, + filename=output_path, + viewbox=f"0 0 {out_size[0]} {out_size[1]}") + + +def merge_svg_by_group( + svg_path_1: AnyStr, + svg_path_2: AnyStr, + cp_offset: Tuple[float, float], + svg_out: AnyStr, + out_size: Tuple[int, int], # e.g.: (600, 600) +): + # load svg_path_1 + tree1 = ET.parse(svg_path_1) + root1 = tree1.getroot() + # new group, and add paths form svg_path_1 + group1 = ET.Element('g') + for i, element in enumerate(root1.iter()): + element.tag = element.tag.split('}')[-1] + if element.tag in ['path', 'polygon']: + group1.append(element) + + # load svg_path_2 + tree2 = ET.parse(svg_path_2) + root2 = tree2.getroot() + # new group, and add paths form svg_path_2 + group2 = ET.Element('g') + for j, path in enumerate(root2.findall('.//{http://www.w3.org/2000/svg}path')): + # Remove the 'svg:' prefix from the tag name + path.tag = path.tag.split('}')[-1] + group2.append(path) + + # new svg + svg = ET.Element('svg', + xmlns="http://www.w3.org/2000/svg", + version='1.1', + width=str(out_size[0]), + height=str(out_size[1])) + + # control group2 + if 'transform' in group2.attrib: + group2.attrib['transform'] += f' translate({cp_offset[0]}, {cp_offset[1]})' + else: + group2.attrib['transform'] = f'translate({cp_offset[0]}, {cp_offset[1]})' + # add two group + svg.append(group1) + svg.append(group2) + # write svg + tree = ET.ElementTree(svg) + tree.write(svg_out, encoding='utf-8', xml_declaration=True) + + +def merge_svg_by_cp( + svg_path_1: AnyStr, + svg_path_2: AnyStr, + p_offset: float, + svg_out: AnyStr, + out_size: Tuple[int, int], # e.g.: (600, 600) +): + # load svg_path_1 + tree1 = ET.parse(svg_path_1) + root1 = tree1.getroot() + # new group, and add paths form svg_path_1 + group1 = ET.Element('g') + for i, element in enumerate(root1.iter()): + element.tag = element.tag.split('}')[-1] + if element.tag in ['path', 'polygon']: + group1.append(element) + + # load svg_path_2 + tree2 = ET.parse(svg_path_2) + root2 = tree2.getroot() + + # new group, and add paths form svg_path_2 + group2 = ET.Element('g') + for j, path in enumerate(root2.findall('.//{http://www.w3.org/2000/svg}path')): + # remove the 'svg:' prefix from the tag name + path.tag = path.tag.split('}')[-1] + + d = path.get('d') + # parse paths + path_data = d.split() + new_path_data = [] + + for i in range(len(path_data)): + if path_data[i].replace('.', '').isdigit(): # get point coordinates + new_param = float(path_data[i]) + p_offset + new_path_data.append(str(new_param)) + else: + new_path_data.append(path_data[i]) + # update new d attrs + path.set('d', ' '.join(new_path_data)) + + group2.append(path) + + # new svg + svg = ET.Element('svg', + xmlns="http://www.w3.org/2000/svg", + version='1.1', + width=str(out_size[0]), + height=str(out_size[1])) + + # add two group + svg.append(group1) + svg.append(group2) + # write svg + tree = ET.ElementTree(svg) + tree.write(svg_out, encoding='utf-8', xml_declaration=True) + + +def merge_two_svgs_edit( + svg_path_1: AnyStr, + svg_path_2: AnyStr, + def_cfg: omegaconf.DictConfig, + p2_offset: Tuple[float, float], + svg_out: AnyStr, + out_size: Tuple[int, int], # e.g.: (600, 600) +): + # load svg_path_1 + tree1 = ET.parse(svg_path_1) + root1 = tree1.getroot() + # new group, and add paths form svg_path_1 + group1 = ET.Element('g') + for i, element in enumerate(root1.iter()): + element.tag = element.tag.split('}')[-1] + if element.tag in ['path', 'polygon']: + group1.append(element) + + # load svg_path_2 + tree2 = ET.parse(svg_path_2) + root2 = tree2.getroot() + + # new group, and add paths form svg_path_2 + group2 = ET.Element('g') + for j, path in enumerate(root2.findall('.//{http://www.w3.org/2000/svg}path')): + # remove the 'svg:' prefix from the tag name + path.tag = path.tag.split('}')[-1] + + d = path.get('d') + # parse paths + path_data = d.split() + new_path_data = [] + + d_idx = 0 # count digit + for i in range(len(path_data)): + if path_data[i].replace('.', '').isdigit(): # get point coordinates + d_idx += 1 + if d_idx % 2 == 1: # update y + new_param = float(path_data[i]) + (p2_offset[1]) + new_path_data.append(str(new_param)) + else: + new_path_data.append(path_data[i]) + else: + new_path_data.append(path_data[i]) + # update new d attrs + path.set('d', ' '.join(new_path_data)) + + group2.append(path) + + # new svg + svg = ET.Element('svg', + xmlns="http://www.w3.org/2000/svg", + version='1.1', + width=str(out_size[0]), + height=str(out_size[1])) + + # add two group + svg.append(group1) + svg.append(group2) + # write svg + tree = ET.ElementTree(svg) + tree.write(svg_out, encoding='utf-8', xml_declaration=True) diff --git a/pytorch_svgrender/svgtools/process.py b/pytorch_svgrender/svgtools/process.py new file mode 100644 index 0000000000000000000000000000000000000000..734b5a061918f1ef1ae016b4b4d387926c1c583c --- /dev/null +++ b/pytorch_svgrender/svgtools/process.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: process +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +import xml.etree.ElementTree as ET +from typing import Tuple + +import omegaconf + +from .shape import circle_tag, rect_tag +from .type import is_valid_svg + +def delete_empty_path(input_svg: str, output_svg: str): + is_valid_svg(input_svg) + + # read svg + tree = ET.parse(input_svg) + root = tree.getroot() + + group = ET.Element('g') + for i, element in enumerate(root.iter()): + element.tag = element.tag.split('}')[-1] + if element.tag == 'path': + if element.get('d') == 'C NaN NaN' or element.get('d') == '': + continue + group.append(element) + + # new svg + svg = ET.Element('svg', + xmlns="http://www.w3.org/2000/svg", + version='1.1', + width=root.get('width'), + height=root.get('height'), + viewBox=root.get('viewBox')) + svg.append(group) + tree = ET.ElementTree(svg) + tree.write(output_svg, encoding='utf-8', xml_declaration=True) + + +def add_clipPath2def(mounted_node: ET.Element, tag_name: str, attrs: omegaconf.DictConfig): + # add defs node + defs = ET.SubElement(mounted_node, 'defs') # parent=mounted_node, tag='defs' + if tag_name == 'none': + return None + # add clipPath node + id = 'def_clip' + _circleClip = ET.SubElement(defs, 'clipPath', id='def_clip') # parent=defs, tag='clipPath' + # add ops + if tag_name == 'circle_clip': + _circleClip.append( + circle_tag(cx=attrs.cx, cy=attrs.cy, r=attrs.r) + ) + elif tag_name == 'rect_clip': + _circleClip.append( + rect_tag(x=attrs.x, y=attrs.y, rx=attrs.rx, ry=attrs.ry, width=attrs.width, height=attrs.height) + ) + else: + raise NotImplementedError(f'{tag_name} is not exist!') + return id + + +def add_def_tag( + svg_path: str, + def_tag_plan: str, + out_size: Tuple[int, int], # e.g.: (600, 600) +): + is_valid_svg(svg_path) + + width, height = out_size[0], out_size[1] + + # set def tag + if def_tag_plan == 'circle_clip': + def_cfg = omegaconf.DictConfig({ + 'name': 'circle_clip', + 'attrs': {'cx': width // 2, 'cy': height // 2, 'r': int(height * 0.5)} + }) + elif def_tag_plan == 'rect_clip': + def_cfg = omegaconf.DictConfig({ + 'name': 'rect_clip', + 'attrs': {'x': 0, 'y': 0, 'rx': 70, 'ry': 70, 'width': width, 'height': height} + }) + else: + def_cfg = None + + # load SVG + tree = ET.parse(svg_path) + root = tree.getroot() + # new group, and add paths form svg_path_1 + group = ET.Element('g') + for i, element in enumerate(root.iter()): + element.tag = element.tag.split('}')[-1] + if element.tag in ['path', 'polygon']: + group.append(element) + + # new svg + svg = ET.Element('svg', + xmlns="http://www.w3.org/2000/svg", + version='1.1', + width=str(out_size[0]), + height=str(out_size[1])) + # add def tag to the SVG + clip_id = add_clipPath2def(mounted_node=svg, + tag_name=def_cfg.name, + attrs=def_cfg.attrs) + group.set('clip-path', f'url(#{clip_id})') + svg.append(group) + # write svg + tree = ET.ElementTree(svg) + tree.write(svg_path, encoding='utf-8', xml_declaration=True) diff --git a/pytorch_svgrender/svgtools/shape.py b/pytorch_svgrender/svgtools/shape.py new file mode 100644 index 0000000000000000000000000000000000000000..b7a8bc4f80a27b20a51ae50195ca37f93397b6a4 --- /dev/null +++ b/pytorch_svgrender/svgtools/shape.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: SVGDreamer - shape +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +import xml.etree.ElementTree as ET + + +def circle_tag(cx: float, cy: float, r: float, transform: str = None): + attrib = { + 'cx': f'{cx}', 'cy': f'{cy}', 'r': f'{r}' + } + if transform is not None: + attrib['transform'] = transform + _circle = ET.Element('circle', attrib) # tag, attrib + return _circle + + +def rect_tag( + x: float, y: float, rx: float, ry: float, + width: float = 600, height: float = 600, + transform: str = None +): + attrib = { + 'x': f'{x}', 'y': f'{y}', 'rx': f'{rx}', 'ry': f'{ry}', + 'width': f'{width}', 'height': f'{height}' + } + if transform is not None: + attrib['transform'] = transform + _rect = ET.Element('rect', attrib) # tag, attrib + return _rect diff --git a/pytorch_svgrender/svgtools/tff.py b/pytorch_svgrender/svgtools/tff.py new file mode 100644 index 0000000000000000000000000000000000000000..8073d8f8d12249e8d6fa542554ded8de09ed7218 --- /dev/null +++ b/pytorch_svgrender/svgtools/tff.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: +# Copyright (c) 2023, XiMing Xing. +# License: MIT License +FONT_LIST = [ + 'Bell-MT', + 'DeliusUnicase-Regular', + 'HobeauxRococeaux-Sherman', + 'IndieFlower-Regular', + 'JosefinSans-Light', + 'KaushanScript-Regular', + 'LuckiestGuy-Regular', + 'Noteworthy-Bold', + 'Quicksand', + 'Saira-Regular' +] diff --git a/pytorch_svgrender/svgtools/type.py b/pytorch_svgrender/svgtools/type.py new file mode 100644 index 0000000000000000000000000000000000000000..c22479c62f7909ebb7a09e42e7e764a089efe0d1 --- /dev/null +++ b/pytorch_svgrender/svgtools/type.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: SVGDreamer - type checking +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +from typing import AnyStr + +import xml.etree.ElementTree as ET + + +def is_valid_svg(file_path: AnyStr) -> bool: + try: + tree = ET.parse(file_path) + root = tree.getroot() + if root.tag.endswith('svg') and 'xmlns' in root.attrib: + return True + else: + return False + except ET.ParseError: + return False diff --git a/pytorch_svgrender/token2attn/__init__.py b/pytorch_svgrender/token2attn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad761f2f5443eb41b15afc4116a66ecdfa9d918 --- /dev/null +++ b/pytorch_svgrender/token2attn/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: diff --git a/pytorch_svgrender/token2attn/attn_control.py b/pytorch_svgrender/token2attn/attn_control.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec474ccab6ebea796bc7a9d2e91c52ad8d9ed57 --- /dev/null +++ b/pytorch_svgrender/token2attn/attn_control.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing +# Description: + +from abc import ABC, abstractmethod +from typing import Optional, Union, Tuple, List, Dict + +import torch +import torch.nn.functional as F + +from .ptp_utils import (get_word_inds, get_time_words_attention_alpha) +from .seq_aligner import (get_replacement_mapper, get_refinement_mapper) + + +class AttentionControl(ABC): + + def __init__(self): + self.cur_step = 0 + self.num_att_layers = -1 + self.cur_att_layer = 0 + + def step_callback(self, x_t): + return x_t + + def between_steps(self): + return + + @property + def num_uncond_att_layers(self): + return 0 + + @abstractmethod + def forward(self, attn, is_cross: bool, place_in_unet: str): + raise NotImplementedError + + def __call__(self, attn, is_cross: bool, place_in_unet: str): + if self.cur_att_layer >= self.num_uncond_att_layers: + h = attn.shape[0] + attn[h // 2:] = self.forward(attn[h // 2:], is_cross, place_in_unet) + self.cur_att_layer += 1 + if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers: + self.cur_att_layer = 0 + self.cur_step += 1 + self.between_steps() + return attn + + def reset(self): + self.cur_step = 0 + self.cur_att_layer = 0 + + +class EmptyControl(AttentionControl): + + def forward(self, attn, is_cross: bool, place_in_unet: str): + return attn + + +class AttentionStore(AttentionControl): + + def __init__(self): + super(AttentionStore, self).__init__() + self.step_store = self.get_empty_store() + self.attention_store = {} + + @staticmethod + def get_empty_store(): + return {"down_cross": [], "mid_cross": [], "up_cross": [], + "down_self": [], "mid_self": [], "up_self": []} + + def forward(self, attn, is_cross: bool, place_in_unet: str): + key = f"{place_in_unet}_{'cross' if is_cross else 'self'}" + if attn.shape[1] <= 32 ** 2: # avoid memory overhead + self.step_store[key].append(attn) + return attn + + def between_steps(self): + if len(self.attention_store) == 0: + self.attention_store = self.step_store + else: + for key in self.attention_store: + for i in range(len(self.attention_store[key])): + self.attention_store[key][i] += self.step_store[key][i] + self.step_store = self.get_empty_store() + + def get_average_attention(self): + average_attention = { + key: [item / self.cur_step for item in self.attention_store[key]] + for key in self.attention_store + } + return average_attention + + def reset(self): + super(AttentionStore, self).reset() + self.step_store = self.get_empty_store() + self.attention_store = {} + + +class LocalBlend: + + def __init__(self, + prompts: List[str], + words: [List[List[str]]], + tokenizer, + device, + threshold=.3, + max_num_words=77): + self.max_num_words = max_num_words + + alpha_layers = torch.zeros(len(prompts), 1, 1, 1, 1, self.max_num_words) + for i, (prompt, words_) in enumerate(zip(prompts, words)): + if type(words_) is str: + words_ = [words_] + for word in words_: + ind = get_word_inds(prompt, word, tokenizer) + alpha_layers[i, :, :, :, :, ind] = 1 + self.alpha_layers = alpha_layers.to(device) + self.threshold = threshold + + def __call__(self, x_t, attention_store): + k = 1 + maps = attention_store["down_cross"][2:4] + attention_store["up_cross"][:3] + maps = [item.reshape(self.alpha_layers.shape[0], -1, 1, 16, 16, self.max_num_words) for item in maps] + maps = torch.cat(maps, dim=1) + maps = (maps * self.alpha_layers).sum(-1).mean(1) + mask = F.max_pool2d(maps, (k * 2 + 1, k * 2 + 1), (1, 1), padding=(k, k)) + mask = F.interpolate(mask, size=(x_t.shape[2:])) + mask = mask / mask.max(2, keepdims=True)[0].max(3, keepdims=True)[0] + mask = mask.gt(self.threshold) + mask = (mask[:1] + mask[1:]).float() + x_t = x_t[:1] + mask * (x_t - x_t[:1]) + return x_t + + +class AttentionControlEdit(AttentionStore, ABC): + + def __init__(self, + prompts, + num_steps: int, + cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]], + self_replace_steps: Union[float, Tuple[float, float]], + local_blend: Optional[LocalBlend], + tokenizer, + device): + super(AttentionControlEdit, self).__init__() + self.tokenizer = tokenizer + self.device = device + + self.batch_size = len(prompts) + self.cross_replace_alpha = get_time_words_attention_alpha(prompts, num_steps, cross_replace_steps, + self.tokenizer).to(self.device) + if type(self_replace_steps) is float: + self_replace_steps = 0, self_replace_steps + self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1]) + self.local_blend = local_blend # define outside + + def step_callback(self, x_t): + if self.local_blend is not None: + x_t = self.local_blend(x_t, self.attention_store) + return x_t + + def replace_self_attention(self, attn_base, att_replace): + if att_replace.shape[2] <= 16 ** 2: + return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape) + else: + return att_replace + + @abstractmethod + def replace_cross_attention(self, attn_base, att_replace): + raise NotImplementedError + + def forward(self, attn, is_cross: bool, place_in_unet: str): + super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet) + # FIXME not replace correctly + if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]): + h = attn.shape[0] // (self.batch_size) + attn = attn.reshape(self.batch_size, h, *attn.shape[1:]) + attn_base, attn_repalce = attn[0], attn[1:] + if is_cross: + alpha_words = self.cross_replace_alpha[self.cur_step] + attn_repalce_new = self.replace_cross_attention(attn_base, attn_repalce) * alpha_words + ( + 1 - alpha_words) * attn_repalce + attn[1:] = attn_repalce_new + else: + attn[1:] = self.replace_self_attention(attn_base, attn_repalce) + attn = attn.reshape(self.batch_size * h, *attn.shape[2:]) + return attn + + +class AttentionReplace(AttentionControlEdit): + + def __init__(self, + prompts, + num_steps: int, + cross_replace_steps: float, + self_replace_steps: float, + local_blend: Optional[LocalBlend] = None, + tokenizer=None, + device=None): + super(AttentionReplace, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, + local_blend, tokenizer, device) + self.mapper = get_replacement_mapper(prompts, self.tokenizer).to(self.device) + + def replace_cross_attention(self, attn_base, att_replace): + return torch.einsum('hpw,bwn->bhpn', attn_base, self.mapper) + + +class AttentionRefine(AttentionControlEdit): + + def __init__(self, + prompts, + num_steps: int, + cross_replace_steps: float, + self_replace_steps: float, + local_blend: Optional[LocalBlend] = None, + tokenizer=None, + device=None): + super(AttentionRefine, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, + local_blend, tokenizer, device) + self.mapper, alphas = get_refinement_mapper(prompts, self.tokenizer) + self.mapper, alphas = self.mapper.to(self.device), alphas.to(self.device) + self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1]) + + def replace_cross_attention(self, attn_base, att_replace): + attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3) + attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas) + return attn_replace + + +class AttentionReweight(AttentionControlEdit): + + def __init__(self, + prompts, + num_steps: int, + cross_replace_steps: float, + self_replace_steps: float, + equalizer, + local_blend: Optional[LocalBlend] = None, + controller: Optional[AttentionControlEdit] = None, + tokenizer=None, + device=None): + super(AttentionReweight, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, + local_blend, tokenizer, device) + self.equalizer = equalizer.to(self.device) + self.prev_controller = controller + + def replace_cross_attention(self, attn_base, att_replace): + if self.prev_controller is not None: + attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace) + attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :] + return attn_replace + + +def get_equalizer(tokenizer, text: str, + word_select: Union[int, Tuple[int, ...]], + values: Union[List[float], Tuple[float, ...]]): + if type(word_select) is int or type(word_select) is str: + word_select = (word_select,) + equalizer = torch.ones(len(values), 77) + values = torch.tensor(values, dtype=torch.float32) + for word in word_select: + inds = get_word_inds(text, word, tokenizer) + equalizer[:, inds] = values + return equalizer diff --git a/pytorch_svgrender/token2attn/ptp_utils.py b/pytorch_svgrender/token2attn/ptp_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..70b9847f2b6f965b37513ed342f88d3a646fecd4 --- /dev/null +++ b/pytorch_svgrender/token2attn/ptp_utils.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +import pathlib +from typing import Union, Optional, List, Tuple, Dict, Text, BinaryIO +from PIL import Image + +import torch +import cv2 +import numpy as np +import matplotlib.pyplot as plt + +from .seq_aligner import get_word_inds + + +def text_under_image(image: np.ndarray, + text: str, + text_color: Tuple[int, int, int] = (0, 0, 0)) -> np.ndarray: + h, w, c = image.shape + offset = int(h * .2) + img = np.ones((h + offset, w, c), dtype=np.uint8) * 255 + font = cv2.FONT_HERSHEY_SIMPLEX + img[:h] = image + textsize = cv2.getTextSize(text, font, 1, 2)[0] + text_x, text_y = (w - textsize[0]) // 2, h + offset - textsize[1] // 2 + cv2.putText(img, text, (text_x, text_y), font, 1, text_color, 2) + return img + + +def view_images( + images: Union[np.ndarray, List[np.ndarray]], + num_rows: int = 1, + offset_ratio: float = 0.02, + save_image: bool = False, + fp: Union[Text, pathlib.Path, BinaryIO] = None, +) -> np.ndarray: + if save_image: + assert fp is not None + + if isinstance(images, list): + images = np.concatenate(images, axis=0) + + if isinstance(images, np.ndarray) and images.ndim == 4: + num_empty = images.shape[0] % num_rows + else: + images = [images] if not isinstance(images, list) else images + num_empty = len(images) % num_rows + + empty_images = np.ones(images[0].shape, dtype=np.uint8) * 255 + images = [image.astype(np.uint8) for image in images] + [empty_images] * num_empty + num_items = len(images) + + # Calculate the composite image + h, w, c = images[0].shape + offset = int(h * offset_ratio) + num_cols = int(np.ceil(num_items / num_rows)) # count the number of columns + image_h = h * num_rows + offset * (num_rows - 1) + image_w = w * num_cols + offset * (num_cols - 1) + assert image_h > 0, "Invalid image height: {} (num_rows={}, offset_ratio={}, num_items={})".format( + image_h, num_rows, offset_ratio, num_items) + assert image_w > 0, "Invalid image width: {} (num_cols={}, offset_ratio={}, num_items={})".format( + image_w, num_cols, offset_ratio, num_items) + image_ = np.ones((image_h, image_w, 3), dtype=np.uint8) * 255 + + # Ensure that the last row is filled with empty images if necessary + if len(images) % num_cols > 0: + empty_images = np.ones(images[0].shape, dtype=np.uint8) * 255 + num_empty = num_cols - len(images) % num_cols + images += [empty_images] * num_empty + + for i in range(num_rows): + for j in range(num_cols): + k = i * num_cols + j + if k >= num_items: + break + image_[i * (h + offset): i * (h + offset) + h, j * (w + offset): j * (w + offset) + w] = images[k] + + pil_img = Image.fromarray(image_) + if save_image: + pil_img.save(fp) + return pil_img + + +def update_alpha_time_word(alpha, + bounds: Union[float, Tuple[float, float]], + prompt_ind: int, + word_inds: Optional[torch.Tensor] = None): + if isinstance(bounds, float): + bounds = 0, bounds + start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0]) + if word_inds is None: + word_inds = torch.arange(alpha.shape[2]) + alpha[: start, prompt_ind, word_inds] = 0 + alpha[start: end, prompt_ind, word_inds] = 1 + alpha[end:, prompt_ind, word_inds] = 0 + return alpha + + +def get_time_words_attention_alpha(prompts, num_steps, + cross_replace_steps: Union[float, Dict[str, Tuple[float, float]]], + tokenizer, + max_num_words=77): + if type(cross_replace_steps) is not dict: + cross_replace_steps = {"default_": cross_replace_steps} + if "default_" not in cross_replace_steps: + cross_replace_steps["default_"] = (0., 1.) + alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words) + for i in range(len(prompts) - 1): + alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"], + i) + for key, item in cross_replace_steps.items(): + if key != "default_": + inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))] + for i, ind in enumerate(inds): + if len(ind) > 0: + alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind) + alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words) + return alpha_time_words diff --git a/pytorch_svgrender/token2attn/seq_aligner.py b/pytorch_svgrender/token2attn/seq_aligner.py new file mode 100644 index 0000000000000000000000000000000000000000..d534d8ae1b6618604c619d56250293f66c0430f5 --- /dev/null +++ b/pytorch_svgrender/token2attn/seq_aligner.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +import torch +import numpy as np + + +class ScoreParams: + + def __init__(self, gap, match, mismatch): + self.gap = gap + self.match = match + self.mismatch = mismatch + + def mis_match_char(self, x, y): + if x != y: + return self.mismatch + else: + return self.match + + +def get_matrix(size_x, size_y, gap): + matrix = [] + for i in range(len(size_x) + 1): + sub_matrix = [] + for j in range(len(size_y) + 1): + sub_matrix.append(0) + matrix.append(sub_matrix) + for j in range(1, len(size_y) + 1): + matrix[0][j] = j * gap + for i in range(1, len(size_x) + 1): + matrix[i][0] = i * gap + return matrix + + +def get_matrix(size_x, size_y, gap): + matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32) + matrix[0, 1:] = (np.arange(size_y) + 1) * gap + matrix[1:, 0] = (np.arange(size_x) + 1) * gap + return matrix + + +def get_traceback_matrix(size_x, size_y): + matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32) + matrix[0, 1:] = 1 + matrix[1:, 0] = 2 + matrix[0, 0] = 4 + return matrix + + +def global_align(x, y, score): + matrix = get_matrix(len(x), len(y), score.gap) + trace_back = get_traceback_matrix(len(x), len(y)) + for i in range(1, len(x) + 1): + for j in range(1, len(y) + 1): + left = matrix[i, j - 1] + score.gap + up = matrix[i - 1, j] + score.gap + diag = matrix[i - 1, j - 1] + score.mis_match_char(x[i - 1], y[j - 1]) + matrix[i, j] = max(left, up, diag) + if matrix[i, j] == left: + trace_back[i, j] = 1 + elif matrix[i, j] == up: + trace_back[i, j] = 2 + else: + trace_back[i, j] = 3 + return matrix, trace_back + + +def get_aligned_sequences(x, y, trace_back): + x_seq = [] + y_seq = [] + i = len(x) + j = len(y) + mapper_y_to_x = [] + while i > 0 or j > 0: + if trace_back[i, j] == 3: + x_seq.append(x[i - 1]) + y_seq.append(y[j - 1]) + i = i - 1 + j = j - 1 + mapper_y_to_x.append((j, i)) + elif trace_back[i][j] == 1: + x_seq.append('-') + y_seq.append(y[j - 1]) + j = j - 1 + mapper_y_to_x.append((j, -1)) + elif trace_back[i][j] == 2: + x_seq.append(x[i - 1]) + y_seq.append('-') + i = i - 1 + elif trace_back[i][j] == 4: + break + mapper_y_to_x.reverse() + return x_seq, y_seq, torch.tensor(mapper_y_to_x, dtype=torch.int64) + + +def get_mapper(x: str, y: str, tokenizer, max_len=77): + x_seq = tokenizer.encode(x) + y_seq = tokenizer.encode(y) + score = ScoreParams(0, 1, -1) + matrix, trace_back = global_align(x_seq, y_seq, score) + mapper_base = get_aligned_sequences(x_seq, y_seq, trace_back)[-1] + alphas = torch.ones(max_len) + alphas[: mapper_base.shape[0]] = mapper_base[:, 1].ne(-1).float() + mapper = torch.zeros(max_len, dtype=torch.int64) + mapper[:mapper_base.shape[0]] = mapper_base[:, 1] + mapper[mapper_base.shape[0]:] = len(y_seq) + torch.arange(max_len - len(y_seq)) + return mapper, alphas + + +def get_refinement_mapper(prompts, tokenizer, max_len=77): + x_seq = prompts[0] + mappers, alphas = [], [] + for i in range(1, len(prompts)): + mapper, alpha = get_mapper(x_seq, prompts[i], tokenizer, max_len) + mappers.append(mapper) + alphas.append(alpha) + return torch.stack(mappers), torch.stack(alphas) + + +def get_word_inds(text: str, word_place: int, tokenizer): + split_text = text.split(" ") + if type(word_place) is str: + word_place = [i for i, word in enumerate(split_text) if word_place == word] + elif type(word_place) is int: + word_place = [word_place] + out = [] + if len(word_place) > 0: + words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1] + cur_len, ptr = 0, 0 + + for i in range(len(words_encode)): + cur_len += len(words_encode[i]) + if ptr in word_place: + out.append(i + 1) + if cur_len >= len(split_text[ptr]): + ptr += 1 + cur_len = 0 + return np.array(out) + + +def get_replacement_mapper_(x: str, y: str, tokenizer, max_len=77): + words_x = x.split(' ') + words_y = y.split(' ') + if len(words_x) != len(words_y): + raise ValueError(f"attention replacement edit can only be applied on prompts with the same length" + f" but prompt A has {len(words_x)} words and prompt B has {len(words_y)} words.") + inds_replace = [i for i in range(len(words_y)) if words_y[i] != words_x[i]] + inds_source = [get_word_inds(x, i, tokenizer) for i in inds_replace] + inds_target = [get_word_inds(y, i, tokenizer) for i in inds_replace] + mapper = np.zeros((max_len, max_len)) + i = j = 0 + cur_inds = 0 + while i < max_len and j < max_len: + if cur_inds < len(inds_source) and inds_source[cur_inds][0] == i: + inds_source_, inds_target_ = inds_source[cur_inds], inds_target[cur_inds] + if len(inds_source_) == len(inds_target_): + mapper[inds_source_, inds_target_] = 1 + else: + ratio = 1 / len(inds_target_) + for i_t in inds_target_: + mapper[inds_source_, i_t] = ratio + cur_inds += 1 + i += len(inds_source_) + j += len(inds_target_) + elif cur_inds < len(inds_source): + mapper[i, j] = 1 + i += 1 + j += 1 + else: + mapper[j, j] = 1 + i += 1 + j += 1 + + return torch.from_numpy(mapper).float() + + +def get_replacement_mapper(prompts, tokenizer, max_len=77): + x_seq = prompts[0] + mappers = [] + for i in range(1, len(prompts)): + mapper = get_replacement_mapper_(x_seq, prompts[i], tokenizer, max_len) + mappers.append(mapper) + return torch.stack(mappers) diff --git a/pytorch_svgrender/utils/__init__.py b/pytorch_svgrender/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..06714075cc8760fab954a1f1b6099c81a8edf3b7 --- /dev/null +++ b/pytorch_svgrender/utils/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: __init__.py +# Copyright (c) 2023, XiMing Xing. +# License: MPL-2.0 License + +from .misc import render_batch_wrap, get_seed_range +from .color_attrs import get_rgb_from_color diff --git a/pytorch_svgrender/utils/color_attrs.py b/pytorch_svgrender/utils/color_attrs.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ca1c1df8c6e86981141e07c17d79c25feb6612 --- /dev/null +++ b/pytorch_svgrender/utils/color_attrs.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: shape_group +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +from typing import Tuple + +import torch +from matplotlib import colors + + +def init_tensor_with_rgb( + rgb: Tuple[float, float, float], + b: int, + w: int, + h: int, + norm: bool = False +): + """ + Initializes a PyTorch tensor with the specified RGB values. The tensor has shape (b, 3, w, h). + + Args: + rgb: RGB values, shape (3,) + b: Batch size + w: Width + h: Height + norm: normalize the tensor to range [0, 1] + + Examples: + >>> rgb = (0.5, 0.2, 0.1) # Specify RGB values + >>> tensor = init_tensor_with_rgb(rgb, 1, 100, 100, norm=False) # Initialize tensor + + Returns: + Initialized tensor + """ + + # Convert RGB values to tensor + rgb = torch.tensor(rgb, dtype=torch.float) + + # Create tensor + tensor = torch.zeros((b, 3, w, h), dtype=torch.float) + + # Assign RGB values to tensor + tensor[:, 0] = rgb[0] + tensor[:, 1] = rgb[1] + tensor[:, 2] = rgb[2] + + if norm: + tensor = tensor / 255. + + return tensor + + +def init_tensor_with_color( + color: str, + b: int, + w: int, + h: int, + norm: bool = True +): + """ + Initializes a PyTorch tensor with the specified RGB values. The tensor has shape (b, 3, w, h). + + Args: + color: + b: Batch size + w: Width + h: Height + norm: normalize the tensor to range [0, 1] + + Examples: + >>> color = '#B0A695' # Specify RGB values + >>> tensor = init_tensor_with_rgb(color, 1, 100, 100) # Initialize tensor + + Returns: + Initialized tensor + """ + + rgb = get_rgb_from_color(color) + + # Convert RGB values to tensor + rgb = torch.tensor(rgb, dtype=torch.float) + + # Create tensor + tensor = torch.zeros((b, 3, w, h), dtype=torch.float) + + # Assign RGB values to tensor + tensor[:, 0] = rgb[0] + tensor[:, 1] = rgb[1] + tensor[:, 2] = rgb[2] + + return tensor + + +def hex_to_rgb(hex_code): + r = int(hex_code[0:2], 16) + g = int(hex_code[2:4], 16) + b = int(hex_code[4:6], 16) + return (r, g, b) + + +def get_rgb_from_color(color: str): + # get the corresponding RGB value based on the color + if color.startswith('#'): + color = color.split('#')[1] + rgb = hex_to_rgb(color) + rgb = [c / 255. for c in rgb] # to [0, 1] + elif color in colors.cnames: + rgb = colors.to_rgb(color) + else: + rgb = color + return rgb + + +if __name__ == "__main__": + color = '#B0A695' + + rgb = get_rgb_from_color(color) + + print(rgb) diff --git a/pytorch_svgrender/utils/inpaint_util.py b/pytorch_svgrender/utils/inpaint_util.py new file mode 100644 index 0000000000000000000000000000000000000000..a1bbe52967aaa28a6cd697aa7b725b5dccf74414 --- /dev/null +++ b/pytorch_svgrender/utils/inpaint_util.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: inpaint_util +# Copyright (c) 2023, XiMing Xing. +# License: MIT License + +import os +import pathlib + +import cv2 +import numpy as np +from omegaconf import OmegaConf +from tqdm import trange +import torch +from torch.utils.data._utils.collate import default_collate + + +def apply_lama_inpaint(predict_config, device): + # local import + from lama.saicinpainting.evaluation.utils import move_to_device + from lama.saicinpainting.evaluation.refinement import refine_predict + from lama.saicinpainting.training.data.datasets import make_default_val_dataset + from lama.saicinpainting.training.trainers import load_checkpoint + + try: + train_config_path = pathlib.Path(predict_config.model.path) / 'config.yaml' + train_config = OmegaConf.load(train_config_path) + + train_config.training_model.predict_only = True + train_config.visualizer.kind = 'noop' + + out_ext = predict_config.get('out_ext', '.png') + + checkpoint_path = os.path.join( + predict_config.model.path, 'models', predict_config.model.checkpoint + ) + model = load_checkpoint(train_config, checkpoint_path, strict=False, map_location='cpu') + model.freeze() + + if not predict_config.get('refine', False): + model.to(device) + + if not predict_config.indir.endswith('/'): + predict_config.indir += '/' + + dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset) + for img_i in trange(len(dataset)): + mask_fname = dataset.mask_filenames[img_i] + cur_out_fname = os.path.join( + predict_config.outdir, + os.path.splitext(mask_fname[len(predict_config.indir):])[0] + out_ext + ) + os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) + batch = default_collate([dataset[img_i]]) + + if predict_config.get('refine', False): + assert 'unpad_to_size' in batch, "Unpadded size is required for the refinement" + # image unpadding is taken care of in the refiner, so that output image + # is same size as the input image + cur_res = refine_predict(batch, model, **predict_config.refiner) + cur_res = cur_res[0].permute(1, 2, 0).detach().cpu().numpy() + else: + with torch.no_grad(): + batch = move_to_device(batch, device) + batch['mask'] = (batch['mask'] > 0) * 1 + batch = model(batch) + cur_res = batch[predict_config.out_key][0].permute(1, 2, 0).detach().cpu().numpy() + unpad_to_size = batch.get('unpad_to_size', None) + if unpad_to_size is not None: + orig_height, orig_width = unpad_to_size + cur_res = cur_res[:orig_height, :orig_width] + + cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8') + cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) + cv2.imwrite(cur_out_fname, cur_res) + + except KeyboardInterrupt: + print('Interrupted by user') + except Exception as ex: + print(f'Prediction failed due to:') + print(f'{ex}') + import sys + sys.exit(1) diff --git a/pytorch_svgrender/utils/misc.py b/pytorch_svgrender/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..132f64681f4e4d4364ea1118b4e7a7f9f04e1eaa --- /dev/null +++ b/pytorch_svgrender/utils/misc.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: misc +# Copyright (c) 2023, XiMing Xing. +# License: MPL-2.0 License + +from datetime import datetime +import random +import pathlib +from typing import Any, List, Dict, Union + +import omegaconf + +"""Add Type""" +AnyPath = Union[str, pathlib.Path, 'os.PathLike'] +AnyList = Union[omegaconf.ListConfig, List] +AnyDict = Union[omegaconf.DictConfig, Dict] + + +def render_batch_wrap(cfg: omegaconf.DictConfig, + seed_range: List, + pipeline: Any, + **pipe_args): + start_time = datetime.now() + for idx, seed in enumerate(seed_range): + cfg.seed = seed # update seed + print(f"\n-> [{idx}/{len(seed_range)}], " + f"current seed: {seed}, " + f"current time: {datetime.now() - start_time}\n") + pipe = pipeline(cfg) + pipe.painterly_rendering(**pipe_args) + + +def get_seed_range(srange: AnyList): + # random sampling without specifying a range + start_, end_ = 1, 1000000 + if srange is not None: # specify range sequential sampling + seed_range_ = list(srange) + assert len(seed_range_) == 2 and int(seed_range_[1]) > int(seed_range_[0]) + start_, end_ = int(seed_range_[0]), int(seed_range_[1]) + seed_range = [i for i in range(start_, end_)] + else: + # a list of lengths 1000 sampled from the range start_ to end_ (e.g.: [1, 1000000]) + numbers = list(range(start_, end_)) + seed_range = random.sample(numbers, k=1000) + return seed_range diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..40a244e6cdd811bffc95ece0201accded299fbb7 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,51 @@ +# --index-url https://download.pytorch.org/whl/cu118 +torch +torchvision +torchaudio + +cmake + +hydra-core +omegaconf +freetype-py +shapely +svgutils +opencv-python +scikit-image +matplotlib +visdom +wandb +BeautifulSoup4 +triton +numba +numpy +scipy +scikit-fmm +einops +timm +fairscale==0.4.13 +accelerate +transformers +safetensors +datasets + +easydict +scikit-learn +pytorch_lightning +webdataset +albumentations==0.5.2 +kornia==0.5.0 +wldhx.yadisk-direct + +ftfy +regex +tqdm +git+https://github.com/openai/CLIP.git + +diffusers==0.20.2 +# xformers + +svgwrite +svgpathtools +cssutils +torch-tools diff --git a/script/download_u2net.sh b/script/download_u2net.sh new file mode 100644 index 0000000000000000000000000000000000000000..bedfc3fd5fbdaf3780ec4ed9491798efddf58d80 --- /dev/null +++ b/script/download_u2net.sh @@ -0,0 +1,8 @@ +#!/bin/bash +eval "$(conda shell.bash hook)" + +cd checkpoint +curl -O -L https://huggingface.co/xingxm/PyTorch-SVGRender-models/resolve/main/u2net.zip +unzip u2net.zip + +echo "U^2Net download success" diff --git a/script/install.sh b/script/install.sh new file mode 100644 index 0000000000000000000000000000000000000000..4ce71d596593efb95e139b0c05f9223ce58261e2 --- /dev/null +++ b/script/install.sh @@ -0,0 +1,58 @@ +#!/bin/bash +eval "$(conda shell.bash hook)" + +conda create --name svgrender python=3.10 +conda activate svgrender + +echo "The conda environment was successfully created" + +conda install pytorch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1 cudatoolkit=11.3 -c pytorch + +echo "Pytorch installation is complete. version: 1.12.1" + +pip install hydra-core omegaconf +pip install freetype-py shapely svgutils +pip install opencv-python scikit-image matplotlib visdom wandb BeautifulSoup4 +pip install triton numba +pip install numpy scipy scikit-fmm einops timm fairscale==0.4.13 +pip install accelerate transformers safetensors datasets + +echo "The basic dependency library is installed." + +pip install easydict scikit-learn pytorch_lightning webdataset +pip install albumentations==0.5.2 +pip install kornia==0.5.0 + +# Noting: you can download the lama model when you need it, +# download LaMa model weights: + +cd lama +curl -O -L https://huggingface.co/xingxm/PyTorch-SVGRender-models/resolve/main/big-lama.zip +unzip big-lama.zip + +echo "LaMa installation is complete." + +pip install ftfy regex tqdm +pip install git+https://github.com/openai/CLIP.git + +echo "CLIP installation is complete." + +pip install diffusers==0.20.2 + +echo "Diffusers installation is complete. version: 0.20.2" + +conda install xformers -c xformers + +echo "xformers installation is complete." + +git clone https://github.com/BachiLi/diffvg.git +cd diffvg +git submodule update --init --recursive +conda install -y -c anaconda cmake +conda install -y -c conda-forge ffmpeg +pip install svgwrite svgpathtools cssutils torch-tools +python setup.py install + +echo "DiffVG installation is complete." + +echo "the running environment has been successfully installed!!!" \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..e90dbf28ddcc3b493532717eca7aed21828c40cb --- /dev/null +++ b/setup.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright (c) XiMing Xing. All rights reserved. +# Author: XiMing Xing + +from setuptools import setup, find_packages + +setup( + name='PyTorch-SVGRender', + packages=find_packages(exclude=["test*", "docs", "examples"]), + version='1.0.0', + license='Mozilla Public License Version 2.0', + description='SVG Differentiable Rendering: Generating vector graphics using neural networks.', + author='XiMing Xing, Juncheng Hu et al.', + author_email='ximingxing@gmail.com', + url='https://github.com/ximinng/PyTorch-SVGRender', + long_description_content_type='text/markdown', + keywords=[ + 'artificial intelligence', + 'AIGC', + 'generative models', + 'SVG generation', + ], + install_requires=[ + 'hydra-core==1.3.2', # configuration processor + 'omegaconf==2.3.0', # YAML processor + 'accelerate==0.20.3', # Hugging Face - pytorch distributed configuration + 'diffusers==0.20.2', # Hugging Face - diffusion models + 'transformers==4.30.2', # Hugging Face - transformers + 'datasets==2.13.1', + 'safetensors==0.3.1', + 'xformers', + 'einops==0.6.1', + 'pillow', + 'imageio-ffmpeg==0.4.8' + 'torch>=1.13.1', + 'torchvision>=0.14.1', + 'tensorboard==2.14.0', + 'triton==2.0.0.post1', + 'numba==0.57.1', + 'tqdm', # progress bar + 'ftfy==6.1.1', + 'regex==2023.6.3', + 'timm==0.6.13', # computer vision models + "numpy==1.24.4", + 'scikit-learn==1.3.2', + 'scikit-fmm==2023.4.2', + 'scipy==1.10.1', + 'scikit-image==0.20.0', + 'Pillow', # keep the PIL.Image.Resampling deprecation away, + 'pytorch-lightning==2.1.0', + 'matplotlib==3.7.1', + 'visdom=0.2.4', + 'wandb==0.15.8', # weights & Biases + 'opencv-python==4.8.0.74', # cv2 + 'BeautifulSoup4==4.12.2', + 'freetype-py', # font + 'shapely==2.0.1', # SVG + 'svgwrite==1.4.3', + 'svgutils==0.3.4', + 'svgpathtools==1.6.1', + 'fairscale=0.4.13' # ImageReward + ], + dependency_links=[ + "clip @ git+https://github.com/openai/CLIP.git", + ], + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 3.10', + ], +) diff --git a/svg_render.py b/svg_render.py new file mode 100644 index 0000000000000000000000000000000000000000..81caa7628fba21c0681a054a34bf0f608caed342 --- /dev/null +++ b/svg_render.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Author: ximing xing +# Description: the main func of this project. +# Copyright (c) 2023, XiMing Xing. + +import os +import sys +from functools import partial + +from accelerate.utils import set_seed +import hydra +import omegaconf + +sys.path.append(os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]) + +from pytorch_svgrender.utils import render_batch_wrap, get_seed_range + +METHODS = [ + 'diffvg', + 'live', + 'vectorfusion', + 'clipasso', + 'clipascene', + 'diffsketcher', + 'stylediffsketcher', + 'clipdraw', + 'styleclipdraw', + 'wordasimage', + 'clipfont', + 'svgdreamer' +] + + +@hydra.main(version_base=None, config_path="conf", config_name='config') +def main(cfg: omegaconf.DictConfig): + # print(omegaconf.OmegaConf.to_yaml(cfg)) + flag = cfg.x.method + assert flag in METHODS, f"{flag} is not currently supported!" + + # seed prepare + set_seed(cfg.seed) + seed_range = get_seed_range(cfg.srange) if cfg.multirun else None + + # render function + render_batch_fn = partial(render_batch_wrap, cfg=cfg, seed_range=seed_range) + + if flag == "diffvg": # img2svg + from pytorch_svgrender.pipelines.DiffVG_pipeline import DiffVGPipeline + + pipe = DiffVGPipeline(cfg) + pipe.painterly_rendering(cfg.target) + + elif flag == "live": # img2svg + from pytorch_svgrender.pipelines.LIVE_pipeline import LIVEPipeline + + pipe = LIVEPipeline(cfg) + pipe.painterly_rendering(cfg.target) + + elif flag == "vectorfusion": # text2svg + from pytorch_svgrender.pipelines.VectorFusion_pipeline import VectorFusionPipeline + + if not cfg.multirun: + pipe = VectorFusionPipeline(cfg) + pipe.painterly_rendering(cfg.prompt) + else: # generate many SVG at once + render_batch_fn(pipeline=VectorFusionPipeline, text_prompt=cfg.prompt) + + elif flag == "svgdreamer": # text2svg + from pytorch_svgrender.pipelines.SVGDreamer_pipeline import SVGDreamerPipeline + + if not cfg.multirun: + pipe = SVGDreamerPipeline(cfg) + pipe.painterly_rendering(cfg.prompt) + else: # generate many SVG at once + render_batch_fn(pipeline=SVGDreamerPipeline, text_prompt=cfg.prompt, target_file=None) + + elif flag == "wordasimage": # text2font + from pytorch_svgrender.pipelines.WordAsImage_pipeline import WordAsImagePipeline + + pipe = WordAsImagePipeline(cfg) + pipe.painterly_rendering(cfg.x.word, cfg.prompt, cfg.x.optim_letter) + + elif flag == "clipasso": # img2sketch + from pytorch_svgrender.pipelines.CLIPasso_pipeline import CLIPassoPipeline + + pipe = CLIPassoPipeline(cfg) + pipe.painterly_rendering(cfg.target) + + elif flag == 'clipascene': + from pytorch_svgrender.pipelines.CLIPascene_pipeline import CLIPascenePipeline + + pipe = CLIPascenePipeline(cfg) + pipe.painterly_rendering(cfg.target) + + elif flag == "clipdraw": # text2svg + from pytorch_svgrender.pipelines.CLIPDraw_pipeline import CLIPDrawPipeline + + pipe = CLIPDrawPipeline(cfg) + pipe.painterly_rendering(cfg.prompt) + + elif flag == "clipfont": # text and font to font + from pytorch_svgrender.pipelines.CLIPFont_pipeline import CLIPFontPipeline + + if not cfg.multirun: + pipe = CLIPFontPipeline(cfg) + pipe.painterly_rendering(svg_path=cfg.target, prompt=cfg.prompt) + else: # generate many SVG at once + render_batch_fn(pipeline=CLIPFontPipeline, svg_path=cfg.target, prompt=cfg.prompt) + + elif flag == "styleclipdraw": # text to stylized svg + from pytorch_svgrender.pipelines.StyleCLIPDraw_pipeline import StyleCLIPDrawPipeline + + pipe = StyleCLIPDrawPipeline(cfg) + pipe.painterly_rendering(cfg.prompt, style_fpath=cfg.target) + + elif flag == "diffsketcher": # text2sketch + from pytorch_svgrender.pipelines.DiffSketcher_pipeline import DiffSketcherPipeline + + if not cfg.multirun: + pipe = DiffSketcherPipeline(cfg) + pipe.painterly_rendering(cfg.prompt) + else: # generate many SVG at once + render_batch_fn(pipeline=DiffSketcherPipeline, prompt=cfg.prompt) + + elif flag == "stylediffsketcher": # text2sketch + style transfer + from pytorch_svgrender.pipelines.DiffSketcher_stylized_pipeline import StylizedDiffSketcherPipeline + + if not cfg.multirun: + pipe = StylizedDiffSketcherPipeline(cfg) + pipe.painterly_rendering(cfg.prompt, style_fpath=cfg.target) + else: # generate many SVG at once + render_batch_fn(pipeline=StylizedDiffSketcherPipeline, prompt=cfg.prompt, style_fpath=cfg.style_file) + + +if __name__ == '__main__': + main() diff --git a/test/test_stable_diffusion.py b/test/test_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..ae73740e8902c60497be4dfb9f8da33bd055a20f --- /dev/null +++ b/test/test_stable_diffusion.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Author: ximing +# Description: test_sd_models +# Copyright (c) 2024, XiMing Xing. +# License: MPL-2.0 License + +import random +from pathlib import Path +from diffusers.utils import load_image, make_image_grid +from accelerate.utils import set_seed + + +def test_SDXL(): + from diffusers import AutoPipelineForText2Image, StableDiffusionXLImg2ImgPipeline + import torch + + set_seed(seed=random.randint(0, 9999999)) + + pipeline_text2image = AutoPipelineForText2Image.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + torch_dtype=torch.float16, + variant="fp16", + use_safetensors=True, + local_files_only=True, + ).to("cuda") + pipeline_text2image.enable_xformers_memory_efficient_attention() + + refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-refiner-1.0", + torch_dtype=torch.float16, + use_safetensors=True, + variant="fp16", + local_files_only=True, + ).to("cuda") + refiner.enable_xformers_memory_efficient_attention() + + # prompt = "A variety of vector graphics. vector art." + # prompt = "unicorn, Die-cut sticker, Cute kawaii flower character sticker, white background, illustration minimalism, vector, pastel colors" + prompt = "DigiArtist holds a shiny SVG paintbrush, Die-cut sticker, Cute kawaii character sticker, 3d blender render, white background, illustration minimalism, vector, pastel colors, physically based rendering" + # prompt = "the batman, Die-cut sticker, Cute kawaii character sticker, white background, illustration minimalism, vector, pastel colors" + + save_path = Path("./test/sdxl-DigiArtist-3") + save_path.mkdir(parents=True, exist_ok=True) + + for i in range(10): + image = pipeline_text2image(prompt=prompt).images[0] + + refined_image = refiner( + prompt=prompt, + num_inference_steps=60, + denoising_start=0.8, + image=image, + ).images[0] + + img = make_image_grid([image], rows=1, cols=1) + img.save(save_path / f'base_{i}.png') + img = make_image_grid([refined_image], rows=1, cols=1) + img.save(save_path / f'refined_{i}.png') + + +if __name__ == '__main__': + # python test/test_stable_diffusion.py + test_SDXL()