File size: 4,390 Bytes
3894c45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/usr/bin/env python
# coding: utf-8

import pathlib
import torch
import yaml
import sys
import os

from math import pi
from PIL import Image
from munch import Munch
from argparse import ArgumentParser as AP
from torchvision.transforms import ToPILImage, ToTensor

p_mod = str(pathlib.Path('.').absolute())
sys.path.append(p_mod.replace("/scripts", ""))

from data.base_dataset import get_transform
from networks import create_model

device='cuda' if torch.cuda.is_available() else 'cpu'
def printProgressBar(i, max, postText):
    n_bar = 20 # size of progress bar
    j = i / max
    sys.stdout.write('\r')
    sys.stdout.write(f"[{'=' * int(n_bar * j):{n_bar}s}] {int(100 * j)}%  {postText}")
    sys.stdout.flush()

def inference(model, opt, A_path, phi):
    t_phi = torch.tensor(phi)
    A_img = Image.open(A_path).convert('RGB')
    A = get_transform(opt, convert=False)(A_img)
    img_real = (((ToTensor()(A)) * 2) - 1).unsqueeze(0)
    img_fake = model.forward(img_real.to(device), t_phi.to(device))

    return ToPILImage()((img_fake[0].cpu() + 1) / 2)

def main(cmdline):
    if cmdline.checkpoint is None:
        # Load names of directories inside /logs
        p = pathlib.Path('./logs')
        list_run_id = [x.name for x in p.iterdir() if x.is_dir()]

        RUN_ID = list_run_id[0]
        root_dir = os.path.join('logs', RUN_ID, 'tensorboard', 'default', 'version_0')
        p = pathlib.Path(root_dir + '/checkpoints')
        # Load a list of checkpoints, use the last one by default
        list_checkpoint = [x.name for x in p.iterdir() if 'iter' in x.name]
        list_checkpoint.sort(reverse=True, key=lambda x: int(x.split('_')[1].split('.pth')[0]))

        CHECKPOINT = list_checkpoint[0]
    else:
        RUN_ID = os.path.basename(cmdline.checkpoint.split("/tensorboard")[0])
        root_dir = os.path.dirname(cmdline.checkpoint.split("/checkpoints")[0])
        CHECKPOINT = os.path.basename(cmdline.checkpoint.split('checkpoints/')[1])

    print(f"Load checkpoint {CHECKPOINT} from {RUN_ID}")

    # Load parameters
    with open(os.path.join(root_dir, 'hparams.yaml')) as cfg_file:
        opt = Munch(yaml.safe_load(cfg_file))

    opt.no_flip = True
    # Load parameters to the model, load the checkpoint
    model = create_model(opt)
    model = model.load_from_checkpoint(os.path.join(root_dir, 'checkpoints', CHECKPOINT))
    # Transfer the model to the GPU
    model.to(device)

    # Load paths of all files contained in /Day
    p = pathlib.Path(cmdline.load_path)
    dataset_paths = [str(x.relative_to(cmdline.load_path)) for x in p.iterdir()]
    dataset_paths.sort()

    # Load only files that contained the given string
    sequence_name = []
    if cmdline.sequence is not None:
        for file in dataset_paths:
            if cmdline.sequence in file:
                sequence_name.append(file)
    else:
        sequence_name = dataset_paths

    # Create directory if it doesn't exist
    os.makedirs(cmdline.save_path, exist_ok=True)

    i = 0
    for path_img in sequence_name:
        printProgressBar(i, len(sequence_name), path_img)
        # Loop over phi values from 0 to 2pi with increments of 0.2
        for phi in torch.arange(0, 2 * pi, 0.2):
            # Forward our image into the model with the specified ɸ
            out_img = inference(model, opt, os.path.join(cmdline.load_path, path_img), phi)
            # Saving the generated image with phi in the filename
            save_path = os.path.join(cmdline.save_path, f"{os.path.splitext(os.path.basename(path_img))[0]}_phi_{phi:.1f}.png")
            out_img.save(save_path)
        i += 1

if __name__ == '__main__':
    ap = AP()
    ap.add_argument('--load_path', default='/datasets/waymo_comogan/val/sunny/Day/', type=str, help='Set a path to load the dataset to translate')
    ap.add_argument('--save_path', default='/CoMoGan/images/', type=str, help='Set a path to save the dataset')
    ap.add_argument('--sequence', default=None, type=str, help='Set a sequence, will only use the image that contained the string specified')
    ap.add_argument('--checkpoint', default=None, type=str, help='Set a path to the checkpoint that you want to use')
    ap.add_argument('--phi', default=0.0, type=float, help='Choose the angle of the sun 𝜙 between [0,2𝜋], which maps to a sun elevation ∈ [+30◦,−40◦]')
    main(ap.parse_args())
    print("\n")