LiuhanChen commited on
Commit
97c92c0
·
verified ·
1 Parent(s): f2b657e

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. =0.19.0 +0 -0
  2. =1.38.0 +0 -0
  3. causalvideovae/dataset/ucf101.py +80 -0
  4. causalvideovae/eval/RAFT/.gitignore +8 -0
  5. causalvideovae/eval/RAFT/alt_cuda_corr/correlation.cpp +54 -0
  6. causalvideovae/eval/RAFT/alt_cuda_corr/setup.py +15 -0
  7. causalvideovae/eval/RAFT/core/raft.py +144 -0
  8. causalvideovae/eval/RAFT/core/update.py +139 -0
  9. causalvideovae/eval/RAFT/core/utils/__init__.py +0 -0
  10. causalvideovae/eval/RAFT/core/utils/augmentor.py +246 -0
  11. causalvideovae/eval/RAFT/core/utils/flow_viz.py +132 -0
  12. causalvideovae/eval/RAFT/evaluate.py +197 -0
  13. causalvideovae/eval/RAFT/train.py +247 -0
  14. causalvideovae/eval/RAFT/train_standard.sh +6 -0
  15. causalvideovae/eval/cal_flolpips.py +83 -0
  16. causalvideovae/eval/cal_fvd.py +85 -0
  17. causalvideovae/eval/cal_lpips.py +97 -0
  18. causalvideovae/eval/cal_psnr.py +97 -0
  19. causalvideovae/eval/cal_we.ipynb +0 -0
  20. causalvideovae/eval/eval_clip_score.py +225 -0
  21. causalvideovae/eval/eval_common_metric.py +228 -0
  22. causalvideovae/eval/flolpips/pwcnet.py +344 -0
  23. causalvideovae/eval/flolpips/utils.py +95 -0
  24. causalvideovae/eval/fvd/styleganv/fvd.py +90 -0
  25. causalvideovae/eval/fvd/videogpt/fvd.py +137 -0
  26. causalvideovae/eval/script/cal_lpips.sh +8 -0
  27. causalvideovae/model/losses/perceptual_loss.py +522 -0
  28. causalvideovae/model/modules/attention.py +287 -0
  29. causalvideovae/model/modules/normalize.py +104 -0
  30. causalvideovae/model/modules/ops.py +40 -0
  31. causalvideovae/model/refiner/__init__.py +2 -0
  32. causalvideovae/model/utils/distrib_utils.py +42 -0
  33. causalvideovae/model/utils/module_utils.py +17 -0
  34. causalvideovae/utils/dataset_utils.py +160 -0
  35. causalvideovae/utils/utils.py +627 -0
  36. scripts/CogVideovae_gen_video.sh +39 -0
  37. scripts/causalvideovae_gen_video.sh +42 -0
  38. scripts/easyanimate_gen_video.sh +40 -0
  39. scripts/rec_CogVideo_vae.py +315 -0
  40. scripts/rec_easyanimate_vae.py +321 -0
  41. scripts/rec_vqgan_vae.py +318 -0
  42. scripts/refine_video.sh +21 -0
  43. scripts/sd2_1_gen_video.sh +30 -0
  44. scripts/svd_gen_video.sh +30 -0
  45. scripts/tats_gen_video.sh +30 -0
  46. scripts/train_ddp.sh +42 -0
  47. scripts/train_ddp_refiner.sh +41 -0
  48. scripts/vae_demo.sh +15 -0
  49. test.py +2 -0
  50. train_ddp.py +586 -0
=0.19.0 ADDED
File without changes
=1.38.0 ADDED
File without changes
causalvideovae/dataset/ucf101.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+
4
+ import decord
5
+ import numpy as np
6
+ import torch
7
+ import torchvision
8
+ from decord import VideoReader, cpu
9
+ from torch.utils.data import Dataset
10
+ from torchvision.transforms import Compose, Lambda, ToTensor
11
+ from torchvision.transforms._transforms_video import NormalizeVideo, RandomCropVideo, RandomHorizontalFlipVideo
12
+ from pytorchvideo.transforms import ApplyTransformToKey, ShortSideScale, UniformTemporalSubsample
13
+ from torch.nn import functional as F
14
+ import random
15
+
16
+ from ..utils.dataset_utils import DecordInit
17
+
18
+
19
+ class UCF101(Dataset):
20
+ def __init__(self, args, transform, temporal_sample):
21
+ self.data_path = args.data_path
22
+ self.num_frames = args.num_frames
23
+ self.transform = transform
24
+ self.temporal_sample = temporal_sample
25
+ self.v_decoder = DecordInit()
26
+
27
+ self.classes = sorted(os.listdir(self.data_path))
28
+ self.class_to_idx = {cls_name: idx for idx, cls_name in enumerate(self.classes)}
29
+ self.samples = self._make_dataset()
30
+
31
+
32
+ def _make_dataset(self):
33
+ dataset = []
34
+ for class_name in self.classes:
35
+ class_path = os.path.join(self.data_path, class_name)
36
+ for fname in os.listdir(class_path):
37
+ if fname.endswith('.avi'):
38
+ item = (os.path.join(class_path, fname), self.class_to_idx[class_name])
39
+ dataset.append(item)
40
+ return dataset
41
+
42
+ def __len__(self):
43
+ return len(self.samples)
44
+
45
+ def __getitem__(self, idx):
46
+ video_path, label = self.samples[idx]
47
+ try:
48
+ video = self.tv_read(video_path)
49
+ video = self.transform(video) # T C H W -> T C H W
50
+ video = video.transpose(0, 1) # T C H W -> C T H W
51
+ return video, label
52
+ except Exception as e:
53
+ print(f'Error with {e}, {video_path}')
54
+ return self.__getitem__(random.randint(0, self.__len__()-1))
55
+
56
+ def tv_read(self, path):
57
+ vframes, aframes, info = torchvision.io.read_video(filename=path, pts_unit='sec', output_format='TCHW')
58
+ total_frames = len(vframes)
59
+
60
+ # Sampling video frames
61
+ start_frame_ind, end_frame_ind = self.temporal_sample(total_frames)
62
+ # assert end_frame_ind - start_frame_ind >= self.num_frames
63
+ frame_indice = np.linspace(start_frame_ind, end_frame_ind - 1, self.num_frames, dtype=int)
64
+ video = vframes[frame_indice] # (T, C, H, W)
65
+
66
+ return video
67
+
68
+ def decord_read(self, path):
69
+ decord_vr = self.v_decoder(path)
70
+ total_frames = len(decord_vr)
71
+ # Sampling video frames
72
+ start_frame_ind, end_frame_ind = self.temporal_sample(total_frames)
73
+ # assert end_frame_ind - start_frame_ind >= self.num_frames
74
+ frame_indice = np.linspace(start_frame_ind, end_frame_ind - 1, self.num_frames, dtype=int)
75
+
76
+ video_data = decord_vr.get_batch(frame_indice).asnumpy()
77
+ video_data = torch.from_numpy(video_data)
78
+ video_data = video_data.permute(0, 3, 1, 2) # (T, H, W, C) -> (T C H W)
79
+ return video_data
80
+
causalvideovae/eval/RAFT/.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ *.pyc
2
+ *.egg-info
3
+ dist
4
+ datasets
5
+ pytorch_env
6
+ models
7
+ build
8
+ correlation.egg-info
causalvideovae/eval/RAFT/alt_cuda_corr/correlation.cpp ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+ #include <vector>
3
+
4
+ // CUDA forward declarations
5
+ std::vector<torch::Tensor> corr_cuda_forward(
6
+ torch::Tensor fmap1,
7
+ torch::Tensor fmap2,
8
+ torch::Tensor coords,
9
+ int radius);
10
+
11
+ std::vector<torch::Tensor> corr_cuda_backward(
12
+ torch::Tensor fmap1,
13
+ torch::Tensor fmap2,
14
+ torch::Tensor coords,
15
+ torch::Tensor corr_grad,
16
+ int radius);
17
+
18
+ // C++ interface
19
+ #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
20
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
21
+ #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
22
+
23
+ std::vector<torch::Tensor> corr_forward(
24
+ torch::Tensor fmap1,
25
+ torch::Tensor fmap2,
26
+ torch::Tensor coords,
27
+ int radius) {
28
+ CHECK_INPUT(fmap1);
29
+ CHECK_INPUT(fmap2);
30
+ CHECK_INPUT(coords);
31
+
32
+ return corr_cuda_forward(fmap1, fmap2, coords, radius);
33
+ }
34
+
35
+
36
+ std::vector<torch::Tensor> corr_backward(
37
+ torch::Tensor fmap1,
38
+ torch::Tensor fmap2,
39
+ torch::Tensor coords,
40
+ torch::Tensor corr_grad,
41
+ int radius) {
42
+ CHECK_INPUT(fmap1);
43
+ CHECK_INPUT(fmap2);
44
+ CHECK_INPUT(coords);
45
+ CHECK_INPUT(corr_grad);
46
+
47
+ return corr_cuda_backward(fmap1, fmap2, coords, corr_grad, radius);
48
+ }
49
+
50
+
51
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
52
+ m.def("forward", &corr_forward, "CORR forward");
53
+ m.def("backward", &corr_backward, "CORR backward");
54
+ }
causalvideovae/eval/RAFT/alt_cuda_corr/setup.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup
2
+ from torch.utils.cpp_extension import BuildExtension, CUDAExtension
3
+
4
+
5
+ setup(
6
+ name='correlation',
7
+ ext_modules=[
8
+ CUDAExtension('alt_cuda_corr',
9
+ sources=['correlation.cpp', 'correlation_kernel.cu'],
10
+ extra_compile_args={'cxx': [], 'nvcc': ['-O3']}),
11
+ ],
12
+ cmdclass={
13
+ 'build_ext': BuildExtension
14
+ })
15
+
causalvideovae/eval/RAFT/core/raft.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+ from .update import BasicUpdateBlock, SmallUpdateBlock
7
+ from .extractor import BasicEncoder, SmallEncoder
8
+ from .corr import CorrBlock, AlternateCorrBlock
9
+ from .utils.utils import bilinear_sampler, coords_grid, upflow8
10
+
11
+ try:
12
+ autocast = torch.cuda.amp.autocast
13
+ except:
14
+ # dummy autocast for PyTorch < 1.6
15
+ class autocast:
16
+ def __init__(self, enabled):
17
+ pass
18
+ def __enter__(self):
19
+ pass
20
+ def __exit__(self, *args):
21
+ pass
22
+
23
+
24
+ class RAFT(nn.Module):
25
+ def __init__(self, args):
26
+ super(RAFT, self).__init__()
27
+ self.args = args
28
+
29
+ if args.small:
30
+ self.hidden_dim = hdim = 96
31
+ self.context_dim = cdim = 64
32
+ args.corr_levels = 4
33
+ args.corr_radius = 3
34
+
35
+ else:
36
+ self.hidden_dim = hdim = 128
37
+ self.context_dim = cdim = 128
38
+ args.corr_levels = 4
39
+ args.corr_radius = 4
40
+
41
+ if 'dropout' not in self.args:
42
+ self.args.dropout = 0
43
+
44
+ if 'alternate_corr' not in self.args:
45
+ self.args.alternate_corr = False
46
+
47
+ # feature network, context network, and update block
48
+ if args.small:
49
+ self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
50
+ self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
51
+ self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
52
+
53
+ else:
54
+ self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
55
+ self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
56
+ self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
57
+
58
+ def freeze_bn(self):
59
+ for m in self.modules():
60
+ if isinstance(m, nn.BatchNorm2d):
61
+ m.eval()
62
+
63
+ def initialize_flow(self, img):
64
+ """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
65
+ N, C, H, W = img.shape
66
+ coords0 = coords_grid(N, H//8, W//8, device=img.device)
67
+ coords1 = coords_grid(N, H//8, W//8, device=img.device)
68
+
69
+ # optical flow computed as difference: flow = coords1 - coords0
70
+ return coords0, coords1
71
+
72
+ def upsample_flow(self, flow, mask):
73
+ """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
74
+ N, _, H, W = flow.shape
75
+ mask = mask.view(N, 1, 9, 8, 8, H, W)
76
+ mask = torch.softmax(mask, dim=2)
77
+
78
+ up_flow = F.unfold(8 * flow, [3,3], padding=1)
79
+ up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
80
+
81
+ up_flow = torch.sum(mask * up_flow, dim=2)
82
+ up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
83
+ return up_flow.reshape(N, 2, 8*H, 8*W)
84
+
85
+
86
+ def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
87
+ """ Estimate optical flow between pair of frames """
88
+
89
+ image1 = 2 * (image1 / 255.0) - 1.0
90
+ image2 = 2 * (image2 / 255.0) - 1.0
91
+
92
+ image1 = image1.contiguous()
93
+ image2 = image2.contiguous()
94
+
95
+ hdim = self.hidden_dim
96
+ cdim = self.context_dim
97
+
98
+ # run the feature network
99
+ with autocast(enabled=self.args.mixed_precision):
100
+ fmap1, fmap2 = self.fnet([image1, image2])
101
+
102
+ fmap1 = fmap1.float()
103
+ fmap2 = fmap2.float()
104
+ if self.args.alternate_corr:
105
+ corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
106
+ else:
107
+ corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
108
+
109
+ # run the context network
110
+ with autocast(enabled=self.args.mixed_precision):
111
+ cnet = self.cnet(image1)
112
+ net, inp = torch.split(cnet, [hdim, cdim], dim=1)
113
+ net = torch.tanh(net)
114
+ inp = torch.relu(inp)
115
+
116
+ coords0, coords1 = self.initialize_flow(image1)
117
+
118
+ if flow_init is not None:
119
+ coords1 = coords1 + flow_init
120
+
121
+ flow_predictions = []
122
+ for itr in range(iters):
123
+ coords1 = coords1.detach()
124
+ corr = corr_fn(coords1) # index correlation volume
125
+
126
+ flow = coords1 - coords0
127
+ with autocast(enabled=self.args.mixed_precision):
128
+ net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
129
+
130
+ # F(t+1) = F(t) + \Delta(t)
131
+ coords1 = coords1 + delta_flow
132
+
133
+ # upsample predictions
134
+ if up_mask is None:
135
+ flow_up = upflow8(coords1 - coords0)
136
+ else:
137
+ flow_up = self.upsample_flow(coords1 - coords0, up_mask)
138
+
139
+ flow_predictions.append(flow_up)
140
+
141
+ if test_mode:
142
+ return coords1 - coords0, flow_up
143
+
144
+ return flow_predictions
causalvideovae/eval/RAFT/core/update.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ class FlowHead(nn.Module):
7
+ def __init__(self, input_dim=128, hidden_dim=256):
8
+ super(FlowHead, self).__init__()
9
+ self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
10
+ self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
11
+ self.relu = nn.ReLU(inplace=True)
12
+
13
+ def forward(self, x):
14
+ return self.conv2(self.relu(self.conv1(x)))
15
+
16
+ class ConvGRU(nn.Module):
17
+ def __init__(self, hidden_dim=128, input_dim=192+128):
18
+ super(ConvGRU, self).__init__()
19
+ self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
20
+ self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
21
+ self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, 3, padding=1)
22
+
23
+ def forward(self, h, x):
24
+ hx = torch.cat([h, x], dim=1)
25
+
26
+ z = torch.sigmoid(self.convz(hx))
27
+ r = torch.sigmoid(self.convr(hx))
28
+ q = torch.tanh(self.convq(torch.cat([r*h, x], dim=1)))
29
+
30
+ h = (1-z) * h + z * q
31
+ return h
32
+
33
+ class SepConvGRU(nn.Module):
34
+ def __init__(self, hidden_dim=128, input_dim=192+128):
35
+ super(SepConvGRU, self).__init__()
36
+ self.convz1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
37
+ self.convr1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
38
+ self.convq1 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (1,5), padding=(0,2))
39
+
40
+ self.convz2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
41
+ self.convr2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
42
+ self.convq2 = nn.Conv2d(hidden_dim+input_dim, hidden_dim, (5,1), padding=(2,0))
43
+
44
+
45
+ def forward(self, h, x):
46
+ # horizontal
47
+ hx = torch.cat([h, x], dim=1)
48
+ z = torch.sigmoid(self.convz1(hx))
49
+ r = torch.sigmoid(self.convr1(hx))
50
+ q = torch.tanh(self.convq1(torch.cat([r*h, x], dim=1)))
51
+ h = (1-z) * h + z * q
52
+
53
+ # vertical
54
+ hx = torch.cat([h, x], dim=1)
55
+ z = torch.sigmoid(self.convz2(hx))
56
+ r = torch.sigmoid(self.convr2(hx))
57
+ q = torch.tanh(self.convq2(torch.cat([r*h, x], dim=1)))
58
+ h = (1-z) * h + z * q
59
+
60
+ return h
61
+
62
+ class SmallMotionEncoder(nn.Module):
63
+ def __init__(self, args):
64
+ super(SmallMotionEncoder, self).__init__()
65
+ cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
66
+ self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
67
+ self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
68
+ self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
69
+ self.conv = nn.Conv2d(128, 80, 3, padding=1)
70
+
71
+ def forward(self, flow, corr):
72
+ cor = F.relu(self.convc1(corr))
73
+ flo = F.relu(self.convf1(flow))
74
+ flo = F.relu(self.convf2(flo))
75
+ cor_flo = torch.cat([cor, flo], dim=1)
76
+ out = F.relu(self.conv(cor_flo))
77
+ return torch.cat([out, flow], dim=1)
78
+
79
+ class BasicMotionEncoder(nn.Module):
80
+ def __init__(self, args):
81
+ super(BasicMotionEncoder, self).__init__()
82
+ cor_planes = args.corr_levels * (2*args.corr_radius + 1)**2
83
+ self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
84
+ self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
85
+ self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
86
+ self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
87
+ self.conv = nn.Conv2d(64+192, 128-2, 3, padding=1)
88
+
89
+ def forward(self, flow, corr):
90
+ cor = F.relu(self.convc1(corr))
91
+ cor = F.relu(self.convc2(cor))
92
+ flo = F.relu(self.convf1(flow))
93
+ flo = F.relu(self.convf2(flo))
94
+
95
+ cor_flo = torch.cat([cor, flo], dim=1)
96
+ out = F.relu(self.conv(cor_flo))
97
+ return torch.cat([out, flow], dim=1)
98
+
99
+ class SmallUpdateBlock(nn.Module):
100
+ def __init__(self, args, hidden_dim=96):
101
+ super(SmallUpdateBlock, self).__init__()
102
+ self.encoder = SmallMotionEncoder(args)
103
+ self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82+64)
104
+ self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
105
+
106
+ def forward(self, net, inp, corr, flow):
107
+ motion_features = self.encoder(flow, corr)
108
+ inp = torch.cat([inp, motion_features], dim=1)
109
+ net = self.gru(net, inp)
110
+ delta_flow = self.flow_head(net)
111
+
112
+ return net, None, delta_flow
113
+
114
+ class BasicUpdateBlock(nn.Module):
115
+ def __init__(self, args, hidden_dim=128, input_dim=128):
116
+ super(BasicUpdateBlock, self).__init__()
117
+ self.args = args
118
+ self.encoder = BasicMotionEncoder(args)
119
+ self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128+hidden_dim)
120
+ self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
121
+
122
+ self.mask = nn.Sequential(
123
+ nn.Conv2d(128, 256, 3, padding=1),
124
+ nn.ReLU(inplace=True),
125
+ nn.Conv2d(256, 64*9, 1, padding=0))
126
+
127
+ def forward(self, net, inp, corr, flow, upsample=True):
128
+ motion_features = self.encoder(flow, corr)
129
+ inp = torch.cat([inp, motion_features], dim=1)
130
+
131
+ net = self.gru(net, inp)
132
+ delta_flow = self.flow_head(net)
133
+
134
+ # scale mask to balence gradients
135
+ mask = .25 * self.mask(net)
136
+ return net, mask, delta_flow
137
+
138
+
139
+
causalvideovae/eval/RAFT/core/utils/__init__.py ADDED
File without changes
causalvideovae/eval/RAFT/core/utils/augmentor.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import random
3
+ import math
4
+ from PIL import Image
5
+
6
+ import cv2
7
+ cv2.setNumThreads(0)
8
+ cv2.ocl.setUseOpenCL(False)
9
+
10
+ import torch
11
+ from torchvision.transforms import ColorJitter
12
+ import torch.nn.functional as F
13
+
14
+
15
+ class FlowAugmentor:
16
+ def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=True):
17
+
18
+ # spatial augmentation params
19
+ self.crop_size = crop_size
20
+ self.min_scale = min_scale
21
+ self.max_scale = max_scale
22
+ self.spatial_aug_prob = 0.8
23
+ self.stretch_prob = 0.8
24
+ self.max_stretch = 0.2
25
+
26
+ # flip augmentation params
27
+ self.do_flip = do_flip
28
+ self.h_flip_prob = 0.5
29
+ self.v_flip_prob = 0.1
30
+
31
+ # photometric augmentation params
32
+ self.photo_aug = ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.5/3.14)
33
+ self.asymmetric_color_aug_prob = 0.2
34
+ self.eraser_aug_prob = 0.5
35
+
36
+ def color_transform(self, img1, img2):
37
+ """ Photometric augmentation """
38
+
39
+ # asymmetric
40
+ if np.random.rand() < self.asymmetric_color_aug_prob:
41
+ img1 = np.array(self.photo_aug(Image.fromarray(img1)), dtype=np.uint8)
42
+ img2 = np.array(self.photo_aug(Image.fromarray(img2)), dtype=np.uint8)
43
+
44
+ # symmetric
45
+ else:
46
+ image_stack = np.concatenate([img1, img2], axis=0)
47
+ image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
48
+ img1, img2 = np.split(image_stack, 2, axis=0)
49
+
50
+ return img1, img2
51
+
52
+ def eraser_transform(self, img1, img2, bounds=[50, 100]):
53
+ """ Occlusion augmentation """
54
+
55
+ ht, wd = img1.shape[:2]
56
+ if np.random.rand() < self.eraser_aug_prob:
57
+ mean_color = np.mean(img2.reshape(-1, 3), axis=0)
58
+ for _ in range(np.random.randint(1, 3)):
59
+ x0 = np.random.randint(0, wd)
60
+ y0 = np.random.randint(0, ht)
61
+ dx = np.random.randint(bounds[0], bounds[1])
62
+ dy = np.random.randint(bounds[0], bounds[1])
63
+ img2[y0:y0+dy, x0:x0+dx, :] = mean_color
64
+
65
+ return img1, img2
66
+
67
+ def spatial_transform(self, img1, img2, flow):
68
+ # randomly sample scale
69
+ ht, wd = img1.shape[:2]
70
+ min_scale = np.maximum(
71
+ (self.crop_size[0] + 8) / float(ht),
72
+ (self.crop_size[1] + 8) / float(wd))
73
+
74
+ scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
75
+ scale_x = scale
76
+ scale_y = scale
77
+ if np.random.rand() < self.stretch_prob:
78
+ scale_x *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
79
+ scale_y *= 2 ** np.random.uniform(-self.max_stretch, self.max_stretch)
80
+
81
+ scale_x = np.clip(scale_x, min_scale, None)
82
+ scale_y = np.clip(scale_y, min_scale, None)
83
+
84
+ if np.random.rand() < self.spatial_aug_prob:
85
+ # rescale the images
86
+ img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
87
+ img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
88
+ flow = cv2.resize(flow, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
89
+ flow = flow * [scale_x, scale_y]
90
+
91
+ if self.do_flip:
92
+ if np.random.rand() < self.h_flip_prob: # h-flip
93
+ img1 = img1[:, ::-1]
94
+ img2 = img2[:, ::-1]
95
+ flow = flow[:, ::-1] * [-1.0, 1.0]
96
+
97
+ if np.random.rand() < self.v_flip_prob: # v-flip
98
+ img1 = img1[::-1, :]
99
+ img2 = img2[::-1, :]
100
+ flow = flow[::-1, :] * [1.0, -1.0]
101
+
102
+ y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0])
103
+ x0 = np.random.randint(0, img1.shape[1] - self.crop_size[1])
104
+
105
+ img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
106
+ img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
107
+ flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
108
+
109
+ return img1, img2, flow
110
+
111
+ def __call__(self, img1, img2, flow):
112
+ img1, img2 = self.color_transform(img1, img2)
113
+ img1, img2 = self.eraser_transform(img1, img2)
114
+ img1, img2, flow = self.spatial_transform(img1, img2, flow)
115
+
116
+ img1 = np.ascontiguousarray(img1)
117
+ img2 = np.ascontiguousarray(img2)
118
+ flow = np.ascontiguousarray(flow)
119
+
120
+ return img1, img2, flow
121
+
122
+ class SparseFlowAugmentor:
123
+ def __init__(self, crop_size, min_scale=-0.2, max_scale=0.5, do_flip=False):
124
+ # spatial augmentation params
125
+ self.crop_size = crop_size
126
+ self.min_scale = min_scale
127
+ self.max_scale = max_scale
128
+ self.spatial_aug_prob = 0.8
129
+ self.stretch_prob = 0.8
130
+ self.max_stretch = 0.2
131
+
132
+ # flip augmentation params
133
+ self.do_flip = do_flip
134
+ self.h_flip_prob = 0.5
135
+ self.v_flip_prob = 0.1
136
+
137
+ # photometric augmentation params
138
+ self.photo_aug = ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.3/3.14)
139
+ self.asymmetric_color_aug_prob = 0.2
140
+ self.eraser_aug_prob = 0.5
141
+
142
+ def color_transform(self, img1, img2):
143
+ image_stack = np.concatenate([img1, img2], axis=0)
144
+ image_stack = np.array(self.photo_aug(Image.fromarray(image_stack)), dtype=np.uint8)
145
+ img1, img2 = np.split(image_stack, 2, axis=0)
146
+ return img1, img2
147
+
148
+ def eraser_transform(self, img1, img2):
149
+ ht, wd = img1.shape[:2]
150
+ if np.random.rand() < self.eraser_aug_prob:
151
+ mean_color = np.mean(img2.reshape(-1, 3), axis=0)
152
+ for _ in range(np.random.randint(1, 3)):
153
+ x0 = np.random.randint(0, wd)
154
+ y0 = np.random.randint(0, ht)
155
+ dx = np.random.randint(50, 100)
156
+ dy = np.random.randint(50, 100)
157
+ img2[y0:y0+dy, x0:x0+dx, :] = mean_color
158
+
159
+ return img1, img2
160
+
161
+ def resize_sparse_flow_map(self, flow, valid, fx=1.0, fy=1.0):
162
+ ht, wd = flow.shape[:2]
163
+ coords = np.meshgrid(np.arange(wd), np.arange(ht))
164
+ coords = np.stack(coords, axis=-1)
165
+
166
+ coords = coords.reshape(-1, 2).astype(np.float32)
167
+ flow = flow.reshape(-1, 2).astype(np.float32)
168
+ valid = valid.reshape(-1).astype(np.float32)
169
+
170
+ coords0 = coords[valid>=1]
171
+ flow0 = flow[valid>=1]
172
+
173
+ ht1 = int(round(ht * fy))
174
+ wd1 = int(round(wd * fx))
175
+
176
+ coords1 = coords0 * [fx, fy]
177
+ flow1 = flow0 * [fx, fy]
178
+
179
+ xx = np.round(coords1[:,0]).astype(np.int32)
180
+ yy = np.round(coords1[:,1]).astype(np.int32)
181
+
182
+ v = (xx > 0) & (xx < wd1) & (yy > 0) & (yy < ht1)
183
+ xx = xx[v]
184
+ yy = yy[v]
185
+ flow1 = flow1[v]
186
+
187
+ flow_img = np.zeros([ht1, wd1, 2], dtype=np.float32)
188
+ valid_img = np.zeros([ht1, wd1], dtype=np.int32)
189
+
190
+ flow_img[yy, xx] = flow1
191
+ valid_img[yy, xx] = 1
192
+
193
+ return flow_img, valid_img
194
+
195
+ def spatial_transform(self, img1, img2, flow, valid):
196
+ # randomly sample scale
197
+
198
+ ht, wd = img1.shape[:2]
199
+ min_scale = np.maximum(
200
+ (self.crop_size[0] + 1) / float(ht),
201
+ (self.crop_size[1] + 1) / float(wd))
202
+
203
+ scale = 2 ** np.random.uniform(self.min_scale, self.max_scale)
204
+ scale_x = np.clip(scale, min_scale, None)
205
+ scale_y = np.clip(scale, min_scale, None)
206
+
207
+ if np.random.rand() < self.spatial_aug_prob:
208
+ # rescale the images
209
+ img1 = cv2.resize(img1, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
210
+ img2 = cv2.resize(img2, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)
211
+ flow, valid = self.resize_sparse_flow_map(flow, valid, fx=scale_x, fy=scale_y)
212
+
213
+ if self.do_flip:
214
+ if np.random.rand() < 0.5: # h-flip
215
+ img1 = img1[:, ::-1]
216
+ img2 = img2[:, ::-1]
217
+ flow = flow[:, ::-1] * [-1.0, 1.0]
218
+ valid = valid[:, ::-1]
219
+
220
+ margin_y = 20
221
+ margin_x = 50
222
+
223
+ y0 = np.random.randint(0, img1.shape[0] - self.crop_size[0] + margin_y)
224
+ x0 = np.random.randint(-margin_x, img1.shape[1] - self.crop_size[1] + margin_x)
225
+
226
+ y0 = np.clip(y0, 0, img1.shape[0] - self.crop_size[0])
227
+ x0 = np.clip(x0, 0, img1.shape[1] - self.crop_size[1])
228
+
229
+ img1 = img1[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
230
+ img2 = img2[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
231
+ flow = flow[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
232
+ valid = valid[y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
233
+ return img1, img2, flow, valid
234
+
235
+
236
+ def __call__(self, img1, img2, flow, valid):
237
+ img1, img2 = self.color_transform(img1, img2)
238
+ img1, img2 = self.eraser_transform(img1, img2)
239
+ img1, img2, flow, valid = self.spatial_transform(img1, img2, flow, valid)
240
+
241
+ img1 = np.ascontiguousarray(img1)
242
+ img2 = np.ascontiguousarray(img2)
243
+ flow = np.ascontiguousarray(flow)
244
+ valid = np.ascontiguousarray(valid)
245
+
246
+ return img1, img2, flow, valid
causalvideovae/eval/RAFT/core/utils/flow_viz.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization
2
+
3
+
4
+ # MIT License
5
+ #
6
+ # Copyright (c) 2018 Tom Runia
7
+ #
8
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ # of this software and associated documentation files (the "Software"), to deal
10
+ # in the Software without restriction, including without limitation the rights
11
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ # copies of the Software, and to permit persons to whom the Software is
13
+ # furnished to do so, subject to conditions.
14
+ #
15
+ # Author: Tom Runia
16
+ # Date Created: 2018-08-03
17
+
18
+ import numpy as np
19
+
20
+ def make_colorwheel():
21
+ """
22
+ Generates a color wheel for optical flow visualization as presented in:
23
+ Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
24
+ URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
25
+
26
+ Code follows the original C++ source code of Daniel Scharstein.
27
+ Code follows the the Matlab source code of Deqing Sun.
28
+
29
+ Returns:
30
+ np.ndarray: Color wheel
31
+ """
32
+
33
+ RY = 15
34
+ YG = 6
35
+ GC = 4
36
+ CB = 11
37
+ BM = 13
38
+ MR = 6
39
+
40
+ ncols = RY + YG + GC + CB + BM + MR
41
+ colorwheel = np.zeros((ncols, 3))
42
+ col = 0
43
+
44
+ # RY
45
+ colorwheel[0:RY, 0] = 255
46
+ colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
47
+ col = col+RY
48
+ # YG
49
+ colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
50
+ colorwheel[col:col+YG, 1] = 255
51
+ col = col+YG
52
+ # GC
53
+ colorwheel[col:col+GC, 1] = 255
54
+ colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
55
+ col = col+GC
56
+ # CB
57
+ colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
58
+ colorwheel[col:col+CB, 2] = 255
59
+ col = col+CB
60
+ # BM
61
+ colorwheel[col:col+BM, 2] = 255
62
+ colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
63
+ col = col+BM
64
+ # MR
65
+ colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
66
+ colorwheel[col:col+MR, 0] = 255
67
+ return colorwheel
68
+
69
+
70
+ def flow_uv_to_colors(u, v, convert_to_bgr=False):
71
+ """
72
+ Applies the flow color wheel to (possibly clipped) flow components u and v.
73
+
74
+ According to the C++ source code of Daniel Scharstein
75
+ According to the Matlab source code of Deqing Sun
76
+
77
+ Args:
78
+ u (np.ndarray): Input horizontal flow of shape [H,W]
79
+ v (np.ndarray): Input vertical flow of shape [H,W]
80
+ convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
81
+
82
+ Returns:
83
+ np.ndarray: Flow visualization image of shape [H,W,3]
84
+ """
85
+ flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
86
+ colorwheel = make_colorwheel() # shape [55x3]
87
+ ncols = colorwheel.shape[0]
88
+ rad = np.sqrt(np.square(u) + np.square(v))
89
+ a = np.arctan2(-v, -u)/np.pi
90
+ fk = (a+1) / 2*(ncols-1)
91
+ k0 = np.floor(fk).astype(np.int32)
92
+ k1 = k0 + 1
93
+ k1[k1 == ncols] = 0
94
+ f = fk - k0
95
+ for i in range(colorwheel.shape[1]):
96
+ tmp = colorwheel[:,i]
97
+ col0 = tmp[k0] / 255.0
98
+ col1 = tmp[k1] / 255.0
99
+ col = (1-f)*col0 + f*col1
100
+ idx = (rad <= 1)
101
+ col[idx] = 1 - rad[idx] * (1-col[idx])
102
+ col[~idx] = col[~idx] * 0.75 # out of range
103
+ # Note the 2-i => BGR instead of RGB
104
+ ch_idx = 2-i if convert_to_bgr else i
105
+ flow_image[:,:,ch_idx] = np.floor(255 * col)
106
+ return flow_image
107
+
108
+
109
+ def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
110
+ """
111
+ Expects a two dimensional flow image of shape.
112
+
113
+ Args:
114
+ flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
115
+ clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
116
+ convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
117
+
118
+ Returns:
119
+ np.ndarray: Flow visualization image of shape [H,W,3]
120
+ """
121
+ assert flow_uv.ndim == 3, 'input flow must have three dimensions'
122
+ assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
123
+ if clip_flow is not None:
124
+ flow_uv = np.clip(flow_uv, 0, clip_flow)
125
+ u = flow_uv[:,:,0]
126
+ v = flow_uv[:,:,1]
127
+ rad = np.sqrt(np.square(u) + np.square(v))
128
+ rad_max = np.max(rad)
129
+ epsilon = 1e-5
130
+ u = u / (rad_max + epsilon)
131
+ v = v / (rad_max + epsilon)
132
+ return flow_uv_to_colors(u, v, convert_to_bgr)
causalvideovae/eval/RAFT/evaluate.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('core')
3
+
4
+ from PIL import Image
5
+ import argparse
6
+ import os
7
+ import time
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import matplotlib.pyplot as plt
12
+
13
+ import datasets
14
+ from utils import flow_viz
15
+ from utils import frame_utils
16
+
17
+ from raft import RAFT
18
+ from utils.utils import InputPadder, forward_interpolate
19
+
20
+
21
+ @torch.no_grad()
22
+ def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'):
23
+ """ Create submission for the Sintel leaderboard """
24
+ model.eval()
25
+ for dstype in ['clean', 'final']:
26
+ test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype)
27
+
28
+ flow_prev, sequence_prev = None, None
29
+ for test_id in range(len(test_dataset)):
30
+ image1, image2, (sequence, frame) = test_dataset[test_id]
31
+ if sequence != sequence_prev:
32
+ flow_prev = None
33
+
34
+ padder = InputPadder(image1.shape)
35
+ image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
36
+
37
+ flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True)
38
+ flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
39
+
40
+ if warm_start:
41
+ flow_prev = forward_interpolate(flow_low[0])[None].cuda()
42
+
43
+ output_dir = os.path.join(output_path, dstype, sequence)
44
+ output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1))
45
+
46
+ if not os.path.exists(output_dir):
47
+ os.makedirs(output_dir)
48
+
49
+ frame_utils.writeFlow(output_file, flow)
50
+ sequence_prev = sequence
51
+
52
+
53
+ @torch.no_grad()
54
+ def create_kitti_submission(model, iters=24, output_path='kitti_submission'):
55
+ """ Create submission for the Sintel leaderboard """
56
+ model.eval()
57
+ test_dataset = datasets.KITTI(split='testing', aug_params=None)
58
+
59
+ if not os.path.exists(output_path):
60
+ os.makedirs(output_path)
61
+
62
+ for test_id in range(len(test_dataset)):
63
+ image1, image2, (frame_id, ) = test_dataset[test_id]
64
+ padder = InputPadder(image1.shape, mode='kitti')
65
+ image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
66
+
67
+ _, flow_pr = model(image1, image2, iters=iters, test_mode=True)
68
+ flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
69
+
70
+ output_filename = os.path.join(output_path, frame_id)
71
+ frame_utils.writeFlowKITTI(output_filename, flow)
72
+
73
+
74
+ @torch.no_grad()
75
+ def validate_chairs(model, iters=24):
76
+ """ Perform evaluation on the FlyingChairs (test) split """
77
+ model.eval()
78
+ epe_list = []
79
+
80
+ val_dataset = datasets.FlyingChairs(split='validation')
81
+ for val_id in range(len(val_dataset)):
82
+ image1, image2, flow_gt, _ = val_dataset[val_id]
83
+ image1 = image1[None].cuda()
84
+ image2 = image2[None].cuda()
85
+
86
+ _, flow_pr = model(image1, image2, iters=iters, test_mode=True)
87
+ epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt()
88
+ epe_list.append(epe.view(-1).numpy())
89
+
90
+ epe = np.mean(np.concatenate(epe_list))
91
+ print("Validation Chairs EPE: %f" % epe)
92
+ return {'chairs': epe}
93
+
94
+
95
+ @torch.no_grad()
96
+ def validate_sintel(model, iters=32):
97
+ """ Peform validation using the Sintel (train) split """
98
+ model.eval()
99
+ results = {}
100
+ for dstype in ['clean', 'final']:
101
+ val_dataset = datasets.MpiSintel(split='training', dstype=dstype)
102
+ epe_list = []
103
+
104
+ for val_id in range(len(val_dataset)):
105
+ image1, image2, flow_gt, _ = val_dataset[val_id]
106
+ image1 = image1[None].cuda()
107
+ image2 = image2[None].cuda()
108
+
109
+ padder = InputPadder(image1.shape)
110
+ image1, image2 = padder.pad(image1, image2)
111
+
112
+ flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
113
+ flow = padder.unpad(flow_pr[0]).cpu()
114
+
115
+ epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
116
+ epe_list.append(epe.view(-1).numpy())
117
+
118
+ epe_all = np.concatenate(epe_list)
119
+ epe = np.mean(epe_all)
120
+ px1 = np.mean(epe_all<1)
121
+ px3 = np.mean(epe_all<3)
122
+ px5 = np.mean(epe_all<5)
123
+
124
+ print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5))
125
+ results[dstype] = np.mean(epe_list)
126
+
127
+ return results
128
+
129
+
130
+ @torch.no_grad()
131
+ def validate_kitti(model, iters=24):
132
+ """ Peform validation using the KITTI-2015 (train) split """
133
+ model.eval()
134
+ val_dataset = datasets.KITTI(split='training')
135
+
136
+ out_list, epe_list = [], []
137
+ for val_id in range(len(val_dataset)):
138
+ image1, image2, flow_gt, valid_gt = val_dataset[val_id]
139
+ image1 = image1[None].cuda()
140
+ image2 = image2[None].cuda()
141
+
142
+ padder = InputPadder(image1.shape, mode='kitti')
143
+ image1, image2 = padder.pad(image1, image2)
144
+
145
+ flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)
146
+ flow = padder.unpad(flow_pr[0]).cpu()
147
+
148
+ epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()
149
+ mag = torch.sum(flow_gt**2, dim=0).sqrt()
150
+
151
+ epe = epe.view(-1)
152
+ mag = mag.view(-1)
153
+ val = valid_gt.view(-1) >= 0.5
154
+
155
+ out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()
156
+ epe_list.append(epe[val].mean().item())
157
+ out_list.append(out[val].cpu().numpy())
158
+
159
+ epe_list = np.array(epe_list)
160
+ out_list = np.concatenate(out_list)
161
+
162
+ epe = np.mean(epe_list)
163
+ f1 = 100 * np.mean(out_list)
164
+
165
+ print("Validation KITTI: %f, %f" % (epe, f1))
166
+ return {'kitti-epe': epe, 'kitti-f1': f1}
167
+
168
+
169
+ if __name__ == '__main__':
170
+ parser = argparse.ArgumentParser()
171
+ parser.add_argument('--model', help="restore checkpoint")
172
+ parser.add_argument('--dataset', help="dataset for evaluation")
173
+ parser.add_argument('--small', action='store_true', help='use small model')
174
+ parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
175
+ parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
176
+ args = parser.parse_args()
177
+
178
+ model = torch.nn.DataParallel(RAFT(args))
179
+ model.load_state_dict(torch.load(args.model))
180
+
181
+ model.cuda()
182
+ model.eval()
183
+
184
+ # create_sintel_submission(model.module, warm_start=True)
185
+ # create_kitti_submission(model.module)
186
+
187
+ with torch.no_grad():
188
+ if args.dataset == 'chairs':
189
+ validate_chairs(model.module)
190
+
191
+ elif args.dataset == 'sintel':
192
+ validate_sintel(model.module)
193
+
194
+ elif args.dataset == 'kitti':
195
+ validate_kitti(model.module)
196
+
197
+
causalvideovae/eval/RAFT/train.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function, division
2
+ import sys
3
+ sys.path.append('core')
4
+
5
+ import argparse
6
+ import os
7
+ import cv2
8
+ import time
9
+ import numpy as np
10
+ import matplotlib.pyplot as plt
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.optim as optim
15
+ import torch.nn.functional as F
16
+
17
+ from torch.utils.data import DataLoader
18
+ from raft import RAFT
19
+ import evaluate
20
+ import datasets
21
+
22
+ from torch.utils.tensorboard import SummaryWriter
23
+
24
+ try:
25
+ from torch.cuda.amp import GradScaler
26
+ except:
27
+ # dummy GradScaler for PyTorch < 1.6
28
+ class GradScaler:
29
+ def __init__(self):
30
+ pass
31
+ def scale(self, loss):
32
+ return loss
33
+ def unscale_(self, optimizer):
34
+ pass
35
+ def step(self, optimizer):
36
+ optimizer.step()
37
+ def update(self):
38
+ pass
39
+
40
+
41
+ # exclude extremly large displacements
42
+ MAX_FLOW = 400
43
+ SUM_FREQ = 100
44
+ VAL_FREQ = 5000
45
+
46
+
47
+ def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW):
48
+ """ Loss function defined over sequence of flow predictions """
49
+
50
+ n_predictions = len(flow_preds)
51
+ flow_loss = 0.0
52
+
53
+ # exlude invalid pixels and extremely large diplacements
54
+ mag = torch.sum(flow_gt**2, dim=1).sqrt()
55
+ valid = (valid >= 0.5) & (mag < max_flow)
56
+
57
+ for i in range(n_predictions):
58
+ i_weight = gamma**(n_predictions - i - 1)
59
+ i_loss = (flow_preds[i] - flow_gt).abs()
60
+ flow_loss += i_weight * (valid[:, None] * i_loss).mean()
61
+
62
+ epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
63
+ epe = epe.view(-1)[valid.view(-1)]
64
+
65
+ metrics = {
66
+ 'epe': epe.mean().item(),
67
+ '1px': (epe < 1).float().mean().item(),
68
+ '3px': (epe < 3).float().mean().item(),
69
+ '5px': (epe < 5).float().mean().item(),
70
+ }
71
+
72
+ return flow_loss, metrics
73
+
74
+
75
+ def count_parameters(model):
76
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
77
+
78
+
79
+ def fetch_optimizer(args, model):
80
+ """ Create the optimizer and learning rate scheduler """
81
+ optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
82
+
83
+ scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
84
+ pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
85
+
86
+ return optimizer, scheduler
87
+
88
+
89
+ class Logger:
90
+ def __init__(self, model, scheduler):
91
+ self.model = model
92
+ self.scheduler = scheduler
93
+ self.total_steps = 0
94
+ self.running_loss = {}
95
+ self.writer = None
96
+
97
+ def _print_training_status(self):
98
+ metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
99
+ training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
100
+ metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
101
+
102
+ # print the training status
103
+ print(training_str + metrics_str)
104
+
105
+ if self.writer is None:
106
+ self.writer = SummaryWriter()
107
+
108
+ for k in self.running_loss:
109
+ self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
110
+ self.running_loss[k] = 0.0
111
+
112
+ def push(self, metrics):
113
+ self.total_steps += 1
114
+
115
+ for key in metrics:
116
+ if key not in self.running_loss:
117
+ self.running_loss[key] = 0.0
118
+
119
+ self.running_loss[key] += metrics[key]
120
+
121
+ if self.total_steps % SUM_FREQ == SUM_FREQ-1:
122
+ self._print_training_status()
123
+ self.running_loss = {}
124
+
125
+ def write_dict(self, results):
126
+ if self.writer is None:
127
+ self.writer = SummaryWriter()
128
+
129
+ for key in results:
130
+ self.writer.add_scalar(key, results[key], self.total_steps)
131
+
132
+ def close(self):
133
+ self.writer.close()
134
+
135
+
136
+ def train(args):
137
+
138
+ model = nn.DataParallel(RAFT(args), device_ids=args.gpus)
139
+ print("Parameter Count: %d" % count_parameters(model))
140
+
141
+ if args.restore_ckpt is not None:
142
+ model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
143
+
144
+ model.cuda()
145
+ model.train()
146
+
147
+ if args.stage != 'chairs':
148
+ model.module.freeze_bn()
149
+
150
+ train_loader = datasets.fetch_dataloader(args)
151
+ optimizer, scheduler = fetch_optimizer(args, model)
152
+
153
+ total_steps = 0
154
+ scaler = GradScaler(enabled=args.mixed_precision)
155
+ logger = Logger(model, scheduler)
156
+
157
+ VAL_FREQ = 5000
158
+ add_noise = True
159
+
160
+ should_keep_training = True
161
+ while should_keep_training:
162
+
163
+ for i_batch, data_blob in enumerate(train_loader):
164
+ optimizer.zero_grad()
165
+ image1, image2, flow, valid = [x.cuda() for x in data_blob]
166
+
167
+ if args.add_noise:
168
+ stdv = np.random.uniform(0.0, 5.0)
169
+ image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
170
+ image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
171
+
172
+ flow_predictions = model(image1, image2, iters=args.iters)
173
+
174
+ loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma)
175
+ scaler.scale(loss).backward()
176
+ scaler.unscale_(optimizer)
177
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
178
+
179
+ scaler.step(optimizer)
180
+ scheduler.step()
181
+ scaler.update()
182
+
183
+ logger.push(metrics)
184
+
185
+ if total_steps % VAL_FREQ == VAL_FREQ - 1:
186
+ PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
187
+ torch.save(model.state_dict(), PATH)
188
+
189
+ results = {}
190
+ for val_dataset in args.validation:
191
+ if val_dataset == 'chairs':
192
+ results.update(evaluate.validate_chairs(model.module))
193
+ elif val_dataset == 'sintel':
194
+ results.update(evaluate.validate_sintel(model.module))
195
+ elif val_dataset == 'kitti':
196
+ results.update(evaluate.validate_kitti(model.module))
197
+
198
+ logger.write_dict(results)
199
+
200
+ model.train()
201
+ if args.stage != 'chairs':
202
+ model.module.freeze_bn()
203
+
204
+ total_steps += 1
205
+
206
+ if total_steps > args.num_steps:
207
+ should_keep_training = False
208
+ break
209
+
210
+ logger.close()
211
+ PATH = 'checkpoints/%s.pth' % args.name
212
+ torch.save(model.state_dict(), PATH)
213
+
214
+ return PATH
215
+
216
+
217
+ if __name__ == '__main__':
218
+ parser = argparse.ArgumentParser()
219
+ parser.add_argument('--name', default='raft', help="name your experiment")
220
+ parser.add_argument('--stage', help="determines which dataset to use for training")
221
+ parser.add_argument('--restore_ckpt', help="restore checkpoint")
222
+ parser.add_argument('--small', action='store_true', help='use small model')
223
+ parser.add_argument('--validation', type=str, nargs='+')
224
+
225
+ parser.add_argument('--lr', type=float, default=0.00002)
226
+ parser.add_argument('--num_steps', type=int, default=100000)
227
+ parser.add_argument('--batch_size', type=int, default=6)
228
+ parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
229
+ parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
230
+ parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
231
+
232
+ parser.add_argument('--iters', type=int, default=12)
233
+ parser.add_argument('--wdecay', type=float, default=.00005)
234
+ parser.add_argument('--epsilon', type=float, default=1e-8)
235
+ parser.add_argument('--clip', type=float, default=1.0)
236
+ parser.add_argument('--dropout', type=float, default=0.0)
237
+ parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
238
+ parser.add_argument('--add_noise', action='store_true')
239
+ args = parser.parse_args()
240
+
241
+ torch.manual_seed(1234)
242
+ np.random.seed(1234)
243
+
244
+ if not os.path.isdir('checkpoints'):
245
+ os.mkdir('checkpoints')
246
+
247
+ train(args)
causalvideovae/eval/RAFT/train_standard.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ mkdir -p checkpoints
3
+ python -u train.py --name raft-chairs --stage chairs --validation chairs --gpus 0 1 --num_steps 100000 --batch_size 10 --lr 0.0004 --image_size 368 496 --wdecay 0.0001
4
+ python -u train.py --name raft-things --stage things --validation sintel --restore_ckpt checkpoints/raft-chairs.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 400 720 --wdecay 0.0001
5
+ python -u train.py --name raft-sintel --stage sintel --validation sintel --restore_ckpt checkpoints/raft-things.pth --gpus 0 1 --num_steps 100000 --batch_size 6 --lr 0.000125 --image_size 368 768 --wdecay 0.00001 --gamma=0.85
6
+ python -u train.py --name raft-kitti --stage kitti --validation kitti --restore_ckpt checkpoints/raft-sintel.pth --gpus 0 1 --num_steps 50000 --batch_size 6 --lr 0.0001 --image_size 288 960 --wdecay 0.00001 --gamma=0.85
causalvideovae/eval/cal_flolpips.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from tqdm import tqdm
4
+ import math
5
+ from einops import rearrange
6
+ import sys
7
+ sys.path.append(".")
8
+ from flolpips.pwcnet import Network as PWCNet
9
+ from flolpips.flolpips import FloLPIPS
10
+
11
+ loss_fn = FloLPIPS(net='alex', version='0.1').eval().requires_grad_(False)
12
+ flownet = PWCNet().eval().requires_grad_(False)
13
+
14
+ def trans(x):
15
+ return x
16
+
17
+
18
+ def calculate_flolpips(videos1, videos2, device):
19
+ global loss_fn, flownet
20
+
21
+ print("calculate_flowlpips...")
22
+ loss_fn = loss_fn.to(device)
23
+ flownet = flownet.to(device)
24
+
25
+ if videos1.shape != videos2.shape:
26
+ print("Warning: the shape of videos are not equal.")
27
+ min_frames = min(videos1.shape[1], videos2.shape[1])
28
+ videos1 = videos1[:, :min_frames]
29
+ videos2 = videos2[:, :min_frames]
30
+
31
+ videos1 = trans(videos1)
32
+ videos2 = trans(videos2)
33
+
34
+ flolpips_results = []
35
+ for video_num in tqdm(range(videos1.shape[0])):
36
+ video1 = videos1[video_num].to(device)
37
+ video2 = videos2[video_num].to(device)
38
+ frames_rec = video1[:-1]
39
+ frames_rec_next = video1[1:]
40
+ frames_gt = video2[:-1]
41
+ frames_gt_next = video2[1:]
42
+ t, c, h, w = frames_gt.shape
43
+ flow_gt = flownet(frames_gt, frames_gt_next)
44
+ flow_dis = flownet(frames_rec, frames_rec_next)
45
+ flow_diff = flow_gt - flow_dis
46
+ flolpips = loss_fn.forward(frames_gt, frames_rec, flow_diff, normalize=True)
47
+ flolpips_results.append(flolpips.cpu().numpy().tolist())
48
+
49
+ flolpips_results = np.array(flolpips_results) # [batch_size, num_frames]
50
+ flolpips = {}
51
+ flolpips_std = {}
52
+
53
+ for clip_timestamp in range(flolpips_results.shape[1]):
54
+ flolpips[clip_timestamp] = np.mean(flolpips_results[:,clip_timestamp], axis=-1)
55
+ flolpips_std[clip_timestamp] = np.std(flolpips_results[:,clip_timestamp], axis=-1)
56
+
57
+ result = {
58
+ "value": flolpips,
59
+ "value_std": flolpips_std,
60
+ "video_setting": video1.shape,
61
+ "video_setting_name": "time, channel, heigth, width",
62
+ "result": flolpips_results,
63
+ "details": flolpips_results.tolist()
64
+ }
65
+
66
+ return result
67
+
68
+ # test code / using example
69
+
70
+ def main():
71
+ NUMBER_OF_VIDEOS = 8
72
+ VIDEO_LENGTH = 50
73
+ CHANNEL = 3
74
+ SIZE = 64
75
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
76
+ videos2 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
77
+
78
+ import json
79
+ result = calculate_flolpips(videos1, videos2, "cuda:0")
80
+ print(json.dumps(result, indent=4))
81
+
82
+ if __name__ == "__main__":
83
+ main()
causalvideovae/eval/cal_fvd.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from tqdm import tqdm
4
+
5
+ def trans(x):
6
+ # if greyscale images add channel
7
+ if x.shape[-3] == 1:
8
+ x = x.repeat(1, 1, 3, 1, 1)
9
+
10
+ # permute BTCHW -> BCTHW
11
+ x = x.permute(0, 2, 1, 3, 4)
12
+
13
+ return x
14
+
15
+ def calculate_fvd(videos1, videos2, device, method='styleganv'):
16
+
17
+ if method == 'styleganv':
18
+ from fvd.styleganv.fvd import get_fvd_feats, frechet_distance, load_i3d_pretrained
19
+ elif method == 'videogpt':
20
+ from fvd.videogpt.fvd import load_i3d_pretrained
21
+ from fvd.videogpt.fvd import get_fvd_logits as get_fvd_feats
22
+ from fvd.videogpt.fvd import frechet_distance
23
+
24
+ print("calculate_fvd...")
25
+
26
+ # videos [batch_size, timestamps, channel, h, w]
27
+
28
+ assert videos1.shape == videos2.shape
29
+
30
+ i3d = load_i3d_pretrained(device=device)
31
+ fvd_results = []
32
+
33
+ # support grayscale input, if grayscale -> channel*3
34
+ # BTCHW -> BCTHW
35
+ # videos -> [batch_size, channel, timestamps, h, w]
36
+
37
+ videos1 = trans(videos1)
38
+ videos2 = trans(videos2)
39
+
40
+ fvd_results = {}
41
+
42
+ # for calculate FVD, each clip_timestamp must >= 10
43
+ for clip_timestamp in tqdm(range(10, videos1.shape[-3]+1)):
44
+
45
+ # get a video clip
46
+ # videos_clip [batch_size, channel, timestamps[:clip], h, w]
47
+ videos_clip1 = videos1[:, :, : clip_timestamp]
48
+ videos_clip2 = videos2[:, :, : clip_timestamp]
49
+
50
+ # get FVD features
51
+ feats1 = get_fvd_feats(videos_clip1, i3d=i3d, device=device)
52
+ feats2 = get_fvd_feats(videos_clip2, i3d=i3d, device=device)
53
+
54
+ # calculate FVD when timestamps[:clip]
55
+ fvd_results[clip_timestamp] = frechet_distance(feats1, feats2)
56
+
57
+ result = {
58
+ "value": fvd_results,
59
+ "video_setting": videos1.shape,
60
+ "video_setting_name": "batch_size, channel, time, heigth, width",
61
+ }
62
+
63
+ return result
64
+
65
+ # test code / using example
66
+
67
+ def main():
68
+ NUMBER_OF_VIDEOS = 8
69
+ VIDEO_LENGTH = 50
70
+ CHANNEL = 3
71
+ SIZE = 64
72
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
73
+ videos2 = torch.ones(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
74
+ device = torch.device("cuda")
75
+ # device = torch.device("cpu")
76
+
77
+ import json
78
+ result = calculate_fvd(videos1, videos2, device, method='videogpt')
79
+ print(json.dumps(result, indent=4))
80
+
81
+ result = calculate_fvd(videos1, videos2, device, method='styleganv')
82
+ print(json.dumps(result, indent=4))
83
+
84
+ if __name__ == "__main__":
85
+ main()
causalvideovae/eval/cal_lpips.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from tqdm import tqdm
4
+ import math
5
+
6
+ import torch
7
+ import lpips
8
+
9
+ spatial = True # Return a spatial map of perceptual distance.
10
+
11
+ # Linearly calibrated models (LPIPS)
12
+ loss_fn = lpips.LPIPS(net='alex', spatial=spatial) # Can also set net = 'squeeze' or 'vgg'
13
+ # loss_fn = lpips.LPIPS(net='alex', spatial=spatial, lpips=False) # Can also set net = 'squeeze' or 'vgg'
14
+
15
+ def trans(x):
16
+ # if greyscale images add channel
17
+ if x.shape[-3] == 1:
18
+ x = x.repeat(1, 1, 3, 1, 1)
19
+
20
+ # value range [0, 1] -> [-1, 1]
21
+ x = x * 2 - 1
22
+
23
+ return x
24
+
25
+ def calculate_lpips(videos1, videos2, device):
26
+ # image should be RGB, IMPORTANT: normalized to [-1,1]
27
+ print("calculate_lpips...")
28
+
29
+ assert videos1.shape == videos2.shape
30
+
31
+ # videos [batch_size, timestamps, channel, h, w]
32
+
33
+ # support grayscale input, if grayscale -> channel*3
34
+ # value range [0, 1] -> [-1, 1]
35
+ videos1 = trans(videos1)
36
+ videos2 = trans(videos2)
37
+
38
+ lpips_results = []
39
+
40
+ for video_num in tqdm(range(videos1.shape[0])):
41
+ # get a video
42
+ # video [timestamps, channel, h, w]
43
+ video1 = videos1[video_num]
44
+ video2 = videos2[video_num]
45
+
46
+ lpips_results_of_a_video = []
47
+ for clip_timestamp in range(len(video1)):
48
+ # get a img
49
+ # img [timestamps[x], channel, h, w]
50
+ # img [channel, h, w] tensor
51
+
52
+ img1 = video1[clip_timestamp].unsqueeze(0).to(device)
53
+ img2 = video2[clip_timestamp].unsqueeze(0).to(device)
54
+
55
+ loss_fn.to(device)
56
+
57
+ # calculate lpips of a video
58
+ lpips_results_of_a_video.append(loss_fn.forward(img1, img2).mean().detach().cpu().tolist())
59
+ lpips_results.append(lpips_results_of_a_video)
60
+
61
+ lpips_results = np.array(lpips_results)
62
+
63
+ lpips = {}
64
+ lpips_std = {}
65
+
66
+ for clip_timestamp in range(len(video1)):
67
+ lpips[clip_timestamp] = np.mean(lpips_results[:,clip_timestamp])
68
+ lpips_std[clip_timestamp] = np.std(lpips_results[:,clip_timestamp])
69
+
70
+
71
+ result = {
72
+ "value": lpips,
73
+ "value_std": lpips_std,
74
+ "video_setting": video1.shape,
75
+ "video_setting_name": "time, channel, heigth, width",
76
+ }
77
+
78
+ return result
79
+
80
+ # test code / using example
81
+
82
+ def main():
83
+ NUMBER_OF_VIDEOS = 8
84
+ VIDEO_LENGTH = 50
85
+ CHANNEL = 3
86
+ SIZE = 64
87
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
88
+ videos2 = torch.ones(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
89
+ device = torch.device("cuda")
90
+ # device = torch.device("cpu")
91
+
92
+ import json
93
+ result = calculate_lpips(videos1, videos2, device)
94
+ print(json.dumps(result, indent=4))
95
+
96
+ if __name__ == "__main__":
97
+ main()
causalvideovae/eval/cal_psnr.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from tqdm import tqdm
4
+ import math
5
+
6
+ def img_psnr_cuda(img1, img2):
7
+ # [0,1]
8
+ # compute mse
9
+ # mse = np.mean((img1-img2)**2)
10
+ mse = torch.mean((img1 / 1.0 - img2 / 1.0) ** 2)
11
+ # compute psnr
12
+ if mse < 1e-10:
13
+ return 100
14
+ psnr = 20 * torch.log10(1 / torch.sqrt(mse))
15
+ return psnr
16
+
17
+
18
+ def img_psnr(img1, img2):
19
+ # [0,1]
20
+ # compute mse
21
+ # mse = np.mean((img1-img2)**2)
22
+ mse = np.mean((img1 / 1.0 - img2 / 1.0) ** 2)
23
+ # compute psnr
24
+ if mse < 1e-10:
25
+ return 100
26
+ psnr = 20 * math.log10(1 / math.sqrt(mse))
27
+ return psnr
28
+
29
+
30
+ def trans(x):
31
+ return x
32
+
33
+ def calculate_psnr(videos1, videos2):
34
+ print("calculate_psnr...")
35
+
36
+ # videos [batch_size, timestamps, channel, h, w]
37
+
38
+ assert videos1.shape == videos2.shape
39
+
40
+ videos1 = trans(videos1)
41
+ videos2 = trans(videos2)
42
+
43
+ psnr_results = []
44
+
45
+ for video_num in tqdm(range(videos1.shape[0])):
46
+ # get a video
47
+ # video [timestamps, channel, h, w]
48
+ video1 = videos1[video_num]
49
+ video2 = videos2[video_num]
50
+
51
+ psnr_results_of_a_video = []
52
+ for clip_timestamp in range(len(video1)):
53
+ # get a img
54
+ # img [timestamps[x], channel, h, w]
55
+ # img [channel, h, w] numpy
56
+
57
+ img1 = video1[clip_timestamp].numpy()
58
+ img2 = video2[clip_timestamp].numpy()
59
+
60
+ # calculate psnr of a video
61
+ psnr_results_of_a_video.append(img_psnr(img1, img2))
62
+
63
+ psnr_results.append(psnr_results_of_a_video)
64
+
65
+ psnr_results = np.array(psnr_results) # [batch_size, num_frames]
66
+ psnr = {}
67
+ psnr_std = {}
68
+
69
+ for clip_timestamp in range(len(video1)):
70
+ psnr[clip_timestamp] = np.mean(psnr_results[:,clip_timestamp])
71
+ psnr_std[clip_timestamp] = np.std(psnr_results[:,clip_timestamp])
72
+
73
+ result = {
74
+ "value": psnr,
75
+ "value_std": psnr_std,
76
+ "video_setting": video1.shape,
77
+ "video_setting_name": "time, channel, heigth, width",
78
+ }
79
+
80
+ return result
81
+
82
+ # test code / using example
83
+
84
+ def main():
85
+ NUMBER_OF_VIDEOS = 8
86
+ VIDEO_LENGTH = 50
87
+ CHANNEL = 3
88
+ SIZE = 64
89
+ videos1 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
90
+ videos2 = torch.zeros(NUMBER_OF_VIDEOS, VIDEO_LENGTH, CHANNEL, SIZE, SIZE, requires_grad=False)
91
+
92
+ import json
93
+ result = calculate_psnr(videos1, videos2)
94
+ print(json.dumps(result, indent=4))
95
+
96
+ if __name__ == "__main__":
97
+ main()
causalvideovae/eval/cal_we.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
causalvideovae/eval/eval_clip_score.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Calculates the CLIP Scores
2
+
3
+ The CLIP model is a contrasitively learned language-image model. There is
4
+ an image encoder and a text encoder. It is believed that the CLIP model could
5
+ measure the similarity of cross modalities. Please find more information from
6
+ https://github.com/openai/CLIP.
7
+
8
+ The CLIP Score measures the Cosine Similarity between two embedded features.
9
+ This repository utilizes the pretrained CLIP Model to calculate
10
+ the mean average of cosine similarities.
11
+
12
+ See --help to see further details.
13
+
14
+ Code apapted from https://github.com/mseitzer/pytorch-fid and https://github.com/openai/CLIP.
15
+
16
+ Copyright 2023 The Hong Kong Polytechnic University
17
+
18
+ Licensed under the Apache License, Version 2.0 (the "License");
19
+ you may not use this file except in compliance with the License.
20
+ You may obtain a copy of the License at
21
+
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+
24
+ Unless required by applicable law or agreed to in writing, software
25
+ distributed under the License is distributed on an "AS IS" BASIS,
26
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
27
+ See the License for the specific language governing permissions and
28
+ limitations under the License.
29
+ """
30
+ import os
31
+ import os.path as osp
32
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
33
+
34
+ import clip
35
+ import torch
36
+ from PIL import Image
37
+ from torch.utils.data import Dataset, DataLoader
38
+
39
+ try:
40
+ from tqdm import tqdm
41
+ except ImportError:
42
+ # If tqdm is not available, provide a mock version of it
43
+ def tqdm(x):
44
+ return x
45
+
46
+
47
+ IMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm',
48
+ 'tif', 'tiff', 'webp'}
49
+
50
+ TEXT_EXTENSIONS = {'txt'}
51
+
52
+
53
+ class DummyDataset(Dataset):
54
+
55
+ FLAGS = ['img', 'txt']
56
+ def __init__(self, real_path, generated_path,
57
+ real_flag: str = 'img',
58
+ generated_flag: str = 'img',
59
+ transform = None,
60
+ tokenizer = None) -> None:
61
+ super().__init__()
62
+ assert real_flag in self.FLAGS and generated_flag in self.FLAGS, \
63
+ 'CLIP Score only support modality of {}. However, get {} and {}'.format(
64
+ self.FLAGS, real_flag, generated_flag
65
+ )
66
+ self.real_folder = self._combine_without_prefix(real_path)
67
+ self.real_flag = real_flag
68
+ self.fake_foler = self._combine_without_prefix(generated_path)
69
+ self.generated_flag = generated_flag
70
+ self.transform = transform
71
+ self.tokenizer = tokenizer
72
+ # assert self._check()
73
+
74
+ def __len__(self):
75
+ return len(self.real_folder)
76
+
77
+ def __getitem__(self, index):
78
+ if index >= len(self):
79
+ raise IndexError
80
+ real_path = self.real_folder[index]
81
+ generated_path = self.fake_foler[index]
82
+ real_data = self._load_modality(real_path, self.real_flag)
83
+ fake_data = self._load_modality(generated_path, self.generated_flag)
84
+
85
+ sample = dict(real=real_data, fake=fake_data)
86
+ return sample
87
+
88
+ def _load_modality(self, path, modality):
89
+ if modality == 'img':
90
+ data = self._load_img(path)
91
+ elif modality == 'txt':
92
+ data = self._load_txt(path)
93
+ else:
94
+ raise TypeError("Got unexpected modality: {}".format(modality))
95
+ return data
96
+
97
+ def _load_img(self, path):
98
+ img = Image.open(path)
99
+ if self.transform is not None:
100
+ img = self.transform(img)
101
+ return img
102
+
103
+ def _load_txt(self, path):
104
+ with open(path, 'r') as fp:
105
+ data = fp.read()
106
+ fp.close()
107
+ if self.tokenizer is not None:
108
+ data = self.tokenizer(data).squeeze()
109
+ return data
110
+
111
+ def _check(self):
112
+ for idx in range(len(self)):
113
+ real_name = self.real_folder[idx].split('.')
114
+ fake_name = self.fake_folder[idx].split('.')
115
+ if fake_name != real_name:
116
+ return False
117
+ return True
118
+
119
+ def _combine_without_prefix(self, folder_path, prefix='.'):
120
+ folder = []
121
+ for name in os.listdir(folder_path):
122
+ if name[0] == prefix:
123
+ continue
124
+ folder.append(osp.join(folder_path, name))
125
+ folder.sort()
126
+ return folder
127
+
128
+
129
+ @torch.no_grad()
130
+ def calculate_clip_score(dataloader, model, real_flag, generated_flag):
131
+ score_acc = 0.
132
+ sample_num = 0.
133
+ logit_scale = model.logit_scale.exp()
134
+ for batch_data in tqdm(dataloader):
135
+ real = batch_data['real']
136
+ real_features = forward_modality(model, real, real_flag)
137
+ fake = batch_data['fake']
138
+ fake_features = forward_modality(model, fake, generated_flag)
139
+
140
+ # normalize features
141
+ real_features = real_features / real_features.norm(dim=1, keepdim=True).to(torch.float32)
142
+ fake_features = fake_features / fake_features.norm(dim=1, keepdim=True).to(torch.float32)
143
+
144
+ # calculate scores
145
+ # score = logit_scale * real_features @ fake_features.t()
146
+ # score_acc += torch.diag(score).sum()
147
+ score = logit_scale * (fake_features * real_features).sum()
148
+ score_acc += score
149
+ sample_num += real.shape[0]
150
+
151
+ return score_acc / sample_num
152
+
153
+
154
+ def forward_modality(model, data, flag):
155
+ device = next(model.parameters()).device
156
+ if flag == 'img':
157
+ features = model.encode_image(data.to(device))
158
+ elif flag == 'txt':
159
+ features = model.encode_text(data.to(device))
160
+ else:
161
+ raise TypeError
162
+ return features
163
+
164
+
165
+ def main():
166
+ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
167
+ parser.add_argument('--batch-size', type=int, default=50,
168
+ help='Batch size to use')
169
+ parser.add_argument('--clip-model', type=str, default='ViT-B/32',
170
+ help='CLIP model to use')
171
+ parser.add_argument('--num-workers', type=int, default=8,
172
+ help=('Number of processes to use for data loading. '
173
+ 'Defaults to `min(8, num_cpus)`'))
174
+ parser.add_argument('--device', type=str, default=None,
175
+ help='Device to use. Like cuda, cuda:0 or cpu')
176
+ parser.add_argument('--real_flag', type=str, default='img',
177
+ help=('The modality of real path. '
178
+ 'Default to img'))
179
+ parser.add_argument('--generated_flag', type=str, default='txt',
180
+ help=('The modality of generated path. '
181
+ 'Default to txt'))
182
+ parser.add_argument('--real_path', type=str,
183
+ help=('Paths to the real images or '
184
+ 'to .npz statistic files'))
185
+ parser.add_argument('--generated_path', type=str,
186
+ help=('Paths to the generated images or '
187
+ 'to .npz statistic files'))
188
+ args = parser.parse_args()
189
+
190
+ if args.device is None:
191
+ device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')
192
+ else:
193
+ device = torch.device(args.device)
194
+
195
+ if args.num_workers is None:
196
+ try:
197
+ num_cpus = len(os.sched_getaffinity(0))
198
+ except AttributeError:
199
+ # os.sched_getaffinity is not available under Windows, use
200
+ # os.cpu_count instead (which may not return the *available* number
201
+ # of CPUs).
202
+ num_cpus = os.cpu_count()
203
+
204
+ num_workers = min(num_cpus, 8) if num_cpus is not None else 0
205
+ else:
206
+ num_workers = args.num_workers
207
+
208
+ print('Loading CLIP model: {}'.format(args.clip_model))
209
+ model, preprocess = clip.load(args.clip_model, device=device)
210
+
211
+ dataset = DummyDataset(args.real_path, args.generated_path,
212
+ args.real_flag, args.generated_flag,
213
+ transform=preprocess, tokenizer=clip.tokenize)
214
+ dataloader = DataLoader(dataset, args.batch_size,
215
+ num_workers=num_workers, pin_memory=True)
216
+
217
+ print('Calculating CLIP Score:')
218
+ clip_score = calculate_clip_score(dataloader, model,
219
+ args.real_flag, args.generated_flag)
220
+ clip_score = clip_score.cpu().item()
221
+ print('CLIP Score: ', clip_score)
222
+
223
+
224
+ if __name__ == '__main__':
225
+ main()
causalvideovae/eval/eval_common_metric.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Calculates the CLIP Scores
2
+
3
+ The CLIP model is a contrasitively learned language-image model. There is
4
+ an image encoder and a text encoder. It is believed that the CLIP model could
5
+ measure the similarity of cross modalities. Please find more information from
6
+ https://github.com/openai/CLIP.
7
+
8
+ The CLIP Score measures the Cosine Similarity between two embedded features.
9
+ This repository utilizes the pretrained CLIP Model to calculate
10
+ the mean average of cosine similarities.
11
+
12
+ See --help to see further details.
13
+
14
+ Code apapted from https://github.com/mseitzer/pytorch-fid and https://github.com/openai/CLIP.
15
+
16
+ Copyright 2023 The Hong Kong Polytechnic University
17
+
18
+ Licensed under the Apache License, Version 2.0 (the "License");
19
+ you may not use this file except in compliance with the License.
20
+ You may obtain a copy of the License at
21
+
22
+ http://www.apache.org/licenses/LICENSE-2.0
23
+
24
+ Unless required by applicable law or agreed to in writing, software
25
+ distributed under the License is distributed on an "AS IS" BASIS,
26
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
27
+ See the License for the specific language governing permissions and
28
+ limitations under the License.
29
+ """
30
+
31
+ import os
32
+ import os.path as osp
33
+ from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
34
+ import numpy as np
35
+ import torch
36
+ from torch.utils.data import Dataset, DataLoader, Subset
37
+ from decord import VideoReader, cpu
38
+ import random
39
+ from pytorchvideo.transforms import ShortSideScale
40
+ from torchvision.io import read_video
41
+ from torchvision.transforms import Lambda, Compose
42
+ from torchvision.transforms._transforms_video import CenterCropVideo
43
+ from cal_lpips import calculate_lpips
44
+ from cal_fvd import calculate_fvd
45
+ from cal_psnr import calculate_psnr
46
+ #from cal_flolpips import calculate_flolpips
47
+ from cal_ssim import calculate_ssim
48
+ from cal_mse import calculate_mse
49
+
50
+ try:
51
+ from tqdm import tqdm
52
+ except ImportError:
53
+ # If tqdm is not available, provide a mock version of it
54
+ def tqdm(x):
55
+ return x
56
+
57
+ class VideoDataset(Dataset):
58
+ def __init__(self,
59
+ real_video_dir,
60
+ generated_video_dir,
61
+ num_frames,
62
+ sample_rate = 1,
63
+ crop_size=None,
64
+ resolution=128,
65
+ ) -> None:
66
+ super().__init__()
67
+ self.real_video_files = []
68
+ self.generated_video_files = self._combine_without_prefix(generated_video_dir)
69
+ for video_file in self.generated_video_files:
70
+ filename = os.path.basename(video_file)
71
+ self.real_video_files.append(os.path.join(real_video_dir, filename))
72
+ self.num_frames = num_frames
73
+ self.sample_rate = sample_rate
74
+ self.crop_size = crop_size
75
+ self.short_size = resolution
76
+
77
+
78
+ def __len__(self):
79
+ return len(self.real_video_files)
80
+
81
+ def __getitem__(self, index):
82
+ if index >= len(self):
83
+ raise IndexError
84
+ real_video_file = self.real_video_files[index]
85
+ generated_video_file = self.generated_video_files[index]
86
+ real_video_tensor = self._load_video(real_video_file, self.sample_rate)
87
+ generated_video_tensor = self._load_video(generated_video_file, 1)
88
+ return {'real': real_video_tensor, 'generated':generated_video_tensor }
89
+
90
+
91
+ def _load_video(self, video_path, sample_rate):
92
+ num_frames = self.num_frames
93
+ decord_vr = VideoReader(video_path, ctx=cpu(0))
94
+ total_frames = len(decord_vr)
95
+ sample_frames_len = sample_rate * num_frames
96
+
97
+ if total_frames >= sample_frames_len:
98
+ s = 0
99
+ e = s + sample_frames_len
100
+ num_frames = num_frames
101
+ else:
102
+ s = 0
103
+ e = total_frames
104
+ num_frames = int(total_frames / sample_frames_len * num_frames)
105
+ print(f'sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}', video_path,
106
+ total_frames)
107
+
108
+
109
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
110
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
111
+ video_data = torch.from_numpy(video_data)
112
+ video_data = video_data.permute(0, 3, 1, 2) # (T, H, W, C) -> (C, T, H, W)
113
+ return _preprocess(video_data, short_size=self.short_size, crop_size = self.crop_size)
114
+
115
+
116
+ def _combine_without_prefix(self, folder_path, prefix='.'):
117
+ folder = []
118
+ os.makedirs(folder_path, exist_ok=True)
119
+ for name in os.listdir(folder_path):
120
+ if name[0] == prefix:
121
+ continue
122
+ if osp.isfile(osp.join(folder_path, name)):
123
+ folder.append(osp.join(folder_path, name))
124
+ folder.sort()
125
+ return folder
126
+
127
+ def _preprocess(video_data, short_size=128, crop_size=None):
128
+ transform = Compose(
129
+ [
130
+ Lambda(lambda x: x / 255.0),
131
+ ]
132
+ )
133
+ video_outputs = transform(video_data)
134
+ # video_outputs = torch.unsqueeze(video_outputs, 0) # (bz,c,t,h,w)
135
+ return video_outputs
136
+
137
+
138
+ def calculate_common_metric(args, dataloader, device):
139
+
140
+ score_list = []
141
+ for batch_data in tqdm(dataloader): # {'real': real_video_tensor, 'generated':generated_video_tensor }
142
+ real_videos = batch_data['real']
143
+ generated_videos = batch_data['generated']
144
+
145
+ assert real_videos.shape[2] == generated_videos.shape[2]
146
+ if args.metric == 'fvd':
147
+ tmp_list = list(calculate_fvd(real_videos, generated_videos, args.device, method=args.fvd_method)['value'].values())
148
+ elif args.metric == 'ssim':
149
+ tmp_list = list(calculate_ssim(real_videos, generated_videos)['value'].values())
150
+ elif args.metric == 'psnr':
151
+ tmp_list = list(calculate_psnr(real_videos, generated_videos)['value'].values())
152
+ elif args.metric == 'mse':
153
+ tmp_list = list(calculate_mse(real_videos, generated_videos)['value'].values())
154
+ # elif args.metric == 'flolpips':
155
+ # result = calculate_flolpips(real_videos, generated_videos, args.device)
156
+ # tmp_list = list(result['value'].values())
157
+ elif args.metric == 'wraperror':
158
+ result = calculate_wraperror(real_videos, generated_videos, args.device)
159
+ tmp_list = list(result['value'].values())
160
+ else:
161
+ tmp_list = list(calculate_lpips(real_videos, generated_videos, args.device)['value'].values())
162
+ score_list += tmp_list
163
+ return np.mean(score_list)
164
+
165
+ def main():
166
+ parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
167
+ parser.add_argument('--batch_size', type=int, default=2,
168
+ help='Batch size to use')
169
+ parser.add_argument('--real_video_dir', type=str,
170
+ help=('the path of real videos`'))
171
+ parser.add_argument('--generated_video_dir', type=str,
172
+ help=('the path of generated videos`'))
173
+ parser.add_argument('--device', type=str, default=None,
174
+ help='Device to use. Like cuda, cuda:0 or cpu')
175
+ parser.add_argument('--num_workers', type=int, default=8,
176
+ help=('Number of processes to use for data loading. '
177
+ 'Defaults to `min(8, num_cpus)`'))
178
+ parser.add_argument('--sample_fps', type=int, default=30)
179
+ parser.add_argument('--resolution', type=int, default=336)
180
+ parser.add_argument('--crop_size', type=int, default=None)
181
+ parser.add_argument('--num_frames', type=int, default=100)
182
+ parser.add_argument('--sample_rate', type=int, default=1)
183
+ parser.add_argument('--subset_size', type=int, default=None)
184
+ parser.add_argument("--metric", type=str, default="fvd",choices=['fvd','psnr','ssim','lpips', 'flolpips', 'mse'])
185
+ parser.add_argument("--fvd_method", type=str, default='styleganv',choices=['styleganv','videogpt'])
186
+
187
+
188
+ args = parser.parse_args()
189
+
190
+ if args.device is None:
191
+ device = torch.device('cuda:1' if (torch.cuda.is_available()) else 'cpu')
192
+ else:
193
+ device = torch.device(args.device)
194
+
195
+ if args.num_workers is None:
196
+ try:
197
+ num_cpus = len(os.sched_getaffinity(0))
198
+ except AttributeError:
199
+ # os.sched_getaffinity is not available under Windows, use
200
+ # os.cpu_count instead (which may not return the *available* number
201
+ # of CPUs).
202
+ num_cpus = os.cpu_count()
203
+
204
+ num_workers = min(num_cpus, 8) if num_cpus is not None else 0
205
+ else:
206
+ num_workers = args.num_workers
207
+
208
+
209
+ dataset = VideoDataset(args.real_video_dir,
210
+ args.generated_video_dir,
211
+ num_frames = args.num_frames,
212
+ sample_rate = args.sample_rate,
213
+ crop_size=args.crop_size,
214
+ resolution=args.resolution)
215
+
216
+ if args.subset_size:
217
+ indices = range(args.subset_size)
218
+ dataset = Subset(dataset, indices=indices)
219
+
220
+ dataloader = DataLoader(dataset, args.batch_size,
221
+ num_workers=num_workers, pin_memory=True)
222
+
223
+
224
+ metric_score = calculate_common_metric(args, dataloader,device)
225
+ print('metric: ', args.metric, " ",metric_score)
226
+
227
+ if __name__ == '__main__':
228
+ main()
causalvideovae/eval/flolpips/pwcnet.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import torch
4
+
5
+ import getopt
6
+ import math
7
+ import numpy
8
+ import os
9
+ import PIL
10
+ import PIL.Image
11
+ import sys
12
+
13
+ # try:
14
+ from .correlation import correlation # the custom cost volume layer
15
+ # except:
16
+ # sys.path.insert(0, './correlation'); import correlation # you should consider upgrading python
17
+ # end
18
+
19
+ ##########################################################
20
+
21
+ # assert(int(str('').join(torch.__version__.split('.')[0:2])) >= 13) # requires at least pytorch version 1.3.0
22
+
23
+ # torch.set_grad_enabled(False) # make sure to not compute gradients for computational performance
24
+
25
+ # torch.backends.cudnn.enabled = True # make sure to use cudnn for computational performance
26
+
27
+ # ##########################################################
28
+
29
+ # arguments_strModel = 'default' # 'default', or 'chairs-things'
30
+ # arguments_strFirst = './images/first.png'
31
+ # arguments_strSecond = './images/second.png'
32
+ # arguments_strOut = './out.flo'
33
+
34
+ # for strOption, strArgument in getopt.getopt(sys.argv[1:], '', [ strParameter[2:] + '=' for strParameter in sys.argv[1::2] ])[0]:
35
+ # if strOption == '--model' and strArgument != '': arguments_strModel = strArgument # which model to use
36
+ # if strOption == '--first' and strArgument != '': arguments_strFirst = strArgument # path to the first frame
37
+ # if strOption == '--second' and strArgument != '': arguments_strSecond = strArgument # path to the second frame
38
+ # if strOption == '--out' and strArgument != '': arguments_strOut = strArgument # path to where the output should be stored
39
+ # end
40
+
41
+ ##########################################################
42
+
43
+
44
+
45
+ def backwarp(tenInput, tenFlow):
46
+ backwarp_tenGrid = {}
47
+ backwarp_tenPartial = {}
48
+ if str(tenFlow.shape) not in backwarp_tenGrid:
49
+ tenHor = torch.linspace(-1.0 + (1.0 / tenFlow.shape[3]), 1.0 - (1.0 / tenFlow.shape[3]), tenFlow.shape[3]).view(1, 1, 1, -1).expand(-1, -1, tenFlow.shape[2], -1)
50
+ tenVer = torch.linspace(-1.0 + (1.0 / tenFlow.shape[2]), 1.0 - (1.0 / tenFlow.shape[2]), tenFlow.shape[2]).view(1, 1, -1, 1).expand(-1, -1, -1, tenFlow.shape[3])
51
+
52
+ backwarp_tenGrid[str(tenFlow.shape)] = torch.cat([ tenHor, tenVer ], 1).cuda()
53
+ # end
54
+
55
+ if str(tenFlow.shape) not in backwarp_tenPartial:
56
+ backwarp_tenPartial[str(tenFlow.shape)] = tenFlow.new_ones([ tenFlow.shape[0], 1, tenFlow.shape[2], tenFlow.shape[3] ])
57
+ # end
58
+
59
+ tenFlow = torch.cat([ tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0) ], 1)
60
+ tenInput = torch.cat([ tenInput, backwarp_tenPartial[str(tenFlow.shape)] ], 1)
61
+
62
+ tenOutput = torch.nn.functional.grid_sample(input=tenInput, grid=(backwarp_tenGrid[str(tenFlow.shape)] + tenFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='zeros', align_corners=False)
63
+
64
+ tenMask = tenOutput[:, -1:, :, :]; tenMask[tenMask > 0.999] = 1.0; tenMask[tenMask < 1.0] = 0.0
65
+
66
+ return tenOutput[:, :-1, :, :] * tenMask
67
+ # end
68
+
69
+ ##########################################################
70
+
71
+ class Network(torch.nn.Module):
72
+ def __init__(self):
73
+ super(Network, self).__init__()
74
+
75
+ class Extractor(torch.nn.Module):
76
+ def __init__(self):
77
+ super(Extractor, self).__init__()
78
+
79
+ self.netOne = torch.nn.Sequential(
80
+ torch.nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=2, padding=1),
81
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
82
+ torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1),
83
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
84
+ torch.nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1),
85
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
86
+ )
87
+
88
+ self.netTwo = torch.nn.Sequential(
89
+ torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=2, padding=1),
90
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
91
+ torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
92
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
93
+ torch.nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
94
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
95
+ )
96
+
97
+ self.netThr = torch.nn.Sequential(
98
+ torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),
99
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
100
+ torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
101
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
102
+ torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
103
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
104
+ )
105
+
106
+ self.netFou = torch.nn.Sequential(
107
+ torch.nn.Conv2d(in_channels=64, out_channels=96, kernel_size=3, stride=2, padding=1),
108
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
109
+ torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3, stride=1, padding=1),
110
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
111
+ torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=3, stride=1, padding=1),
112
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
113
+ )
114
+
115
+ self.netFiv = torch.nn.Sequential(
116
+ torch.nn.Conv2d(in_channels=96, out_channels=128, kernel_size=3, stride=2, padding=1),
117
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
118
+ torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
119
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
120
+ torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
121
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
122
+ )
123
+
124
+ self.netSix = torch.nn.Sequential(
125
+ torch.nn.Conv2d(in_channels=128, out_channels=196, kernel_size=3, stride=2, padding=1),
126
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
127
+ torch.nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, stride=1, padding=1),
128
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
129
+ torch.nn.Conv2d(in_channels=196, out_channels=196, kernel_size=3, stride=1, padding=1),
130
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
131
+ )
132
+ # end
133
+
134
+ def forward(self, tenInput):
135
+ tenOne = self.netOne(tenInput)
136
+ tenTwo = self.netTwo(tenOne)
137
+ tenThr = self.netThr(tenTwo)
138
+ tenFou = self.netFou(tenThr)
139
+ tenFiv = self.netFiv(tenFou)
140
+ tenSix = self.netSix(tenFiv)
141
+
142
+ return [ tenOne, tenTwo, tenThr, tenFou, tenFiv, tenSix ]
143
+ # end
144
+ # end
145
+
146
+ class Decoder(torch.nn.Module):
147
+ def __init__(self, intLevel):
148
+ super(Decoder, self).__init__()
149
+
150
+ intPrevious = [ None, None, 81 + 32 + 2 + 2, 81 + 64 + 2 + 2, 81 + 96 + 2 + 2, 81 + 128 + 2 + 2, 81, None ][intLevel + 1]
151
+ intCurrent = [ None, None, 81 + 32 + 2 + 2, 81 + 64 + 2 + 2, 81 + 96 + 2 + 2, 81 + 128 + 2 + 2, 81, None ][intLevel + 0]
152
+
153
+ if intLevel < 6: self.netUpflow = torch.nn.ConvTranspose2d(in_channels=2, out_channels=2, kernel_size=4, stride=2, padding=1)
154
+ if intLevel < 6: self.netUpfeat = torch.nn.ConvTranspose2d(in_channels=intPrevious + 128 + 128 + 96 + 64 + 32, out_channels=2, kernel_size=4, stride=2, padding=1)
155
+ if intLevel < 6: self.fltBackwarp = [ None, None, None, 5.0, 2.5, 1.25, 0.625, None ][intLevel + 1]
156
+
157
+ self.netOne = torch.nn.Sequential(
158
+ torch.nn.Conv2d(in_channels=intCurrent, out_channels=128, kernel_size=3, stride=1, padding=1),
159
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
160
+ )
161
+
162
+ self.netTwo = torch.nn.Sequential(
163
+ torch.nn.Conv2d(in_channels=intCurrent + 128, out_channels=128, kernel_size=3, stride=1, padding=1),
164
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
165
+ )
166
+
167
+ self.netThr = torch.nn.Sequential(
168
+ torch.nn.Conv2d(in_channels=intCurrent + 128 + 128, out_channels=96, kernel_size=3, stride=1, padding=1),
169
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
170
+ )
171
+
172
+ self.netFou = torch.nn.Sequential(
173
+ torch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96, out_channels=64, kernel_size=3, stride=1, padding=1),
174
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
175
+ )
176
+
177
+ self.netFiv = torch.nn.Sequential(
178
+ torch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96 + 64, out_channels=32, kernel_size=3, stride=1, padding=1),
179
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1)
180
+ )
181
+
182
+ self.netSix = torch.nn.Sequential(
183
+ torch.nn.Conv2d(in_channels=intCurrent + 128 + 128 + 96 + 64 + 32, out_channels=2, kernel_size=3, stride=1, padding=1)
184
+ )
185
+ # end
186
+
187
+ def forward(self, tenFirst, tenSecond, objPrevious):
188
+ tenFlow = None
189
+ tenFeat = None
190
+
191
+ if objPrevious is None:
192
+ tenFlow = None
193
+ tenFeat = None
194
+
195
+ tenVolume = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenFirst=tenFirst, tenSecond=tenSecond), negative_slope=0.1, inplace=False)
196
+
197
+ tenFeat = torch.cat([ tenVolume ], 1)
198
+
199
+ elif objPrevious is not None:
200
+ tenFlow = self.netUpflow(objPrevious['tenFlow'])
201
+ tenFeat = self.netUpfeat(objPrevious['tenFeat'])
202
+
203
+ tenVolume = torch.nn.functional.leaky_relu(input=correlation.FunctionCorrelation(tenFirst=tenFirst, tenSecond=backwarp(tenInput=tenSecond, tenFlow=tenFlow * self.fltBackwarp)), negative_slope=0.1, inplace=False)
204
+
205
+ tenFeat = torch.cat([ tenVolume, tenFirst, tenFlow, tenFeat ], 1)
206
+
207
+ # end
208
+
209
+ tenFeat = torch.cat([ self.netOne(tenFeat), tenFeat ], 1)
210
+ tenFeat = torch.cat([ self.netTwo(tenFeat), tenFeat ], 1)
211
+ tenFeat = torch.cat([ self.netThr(tenFeat), tenFeat ], 1)
212
+ tenFeat = torch.cat([ self.netFou(tenFeat), tenFeat ], 1)
213
+ tenFeat = torch.cat([ self.netFiv(tenFeat), tenFeat ], 1)
214
+
215
+ tenFlow = self.netSix(tenFeat)
216
+
217
+ return {
218
+ 'tenFlow': tenFlow,
219
+ 'tenFeat': tenFeat
220
+ }
221
+ # end
222
+ # end
223
+
224
+ class Refiner(torch.nn.Module):
225
+ def __init__(self):
226
+ super(Refiner, self).__init__()
227
+
228
+ self.netMain = torch.nn.Sequential(
229
+ torch.nn.Conv2d(in_channels=81 + 32 + 2 + 2 + 128 + 128 + 96 + 64 + 32, out_channels=128, kernel_size=3, stride=1, padding=1, dilation=1),
230
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
231
+ torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=2, dilation=2),
232
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
233
+ torch.nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=4, dilation=4),
234
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
235
+ torch.nn.Conv2d(in_channels=128, out_channels=96, kernel_size=3, stride=1, padding=8, dilation=8),
236
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
237
+ torch.nn.Conv2d(in_channels=96, out_channels=64, kernel_size=3, stride=1, padding=16, dilation=16),
238
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
239
+ torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1, dilation=1),
240
+ torch.nn.LeakyReLU(inplace=False, negative_slope=0.1),
241
+ torch.nn.Conv2d(in_channels=32, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1)
242
+ )
243
+ # end
244
+
245
+ def forward(self, tenInput):
246
+ return self.netMain(tenInput)
247
+ # end
248
+ # end
249
+
250
+ self.netExtractor = Extractor()
251
+
252
+ self.netTwo = Decoder(2)
253
+ self.netThr = Decoder(3)
254
+ self.netFou = Decoder(4)
255
+ self.netFiv = Decoder(5)
256
+ self.netSix = Decoder(6)
257
+
258
+ self.netRefiner = Refiner()
259
+
260
+ self.load_state_dict({ strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.hub.load_state_dict_from_url(url='http://content.sniklaus.com/github/pytorch-pwc/network-' + 'default' + '.pytorch').items() })
261
+ # end
262
+
263
+ def forward(self, tenFirst, tenSecond):
264
+ intWidth = tenFirst.shape[3]
265
+ intHeight = tenFirst.shape[2]
266
+
267
+ intPreprocessedWidth = int(math.floor(math.ceil(intWidth / 64.0) * 64.0))
268
+ intPreprocessedHeight = int(math.floor(math.ceil(intHeight / 64.0) * 64.0))
269
+
270
+ tenPreprocessedFirst = torch.nn.functional.interpolate(input=tenFirst, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
271
+ tenPreprocessedSecond = torch.nn.functional.interpolate(input=tenSecond, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
272
+
273
+ tenFirst = self.netExtractor(tenPreprocessedFirst)
274
+ tenSecond = self.netExtractor(tenPreprocessedSecond)
275
+
276
+
277
+ objEstimate = self.netSix(tenFirst[-1], tenSecond[-1], None)
278
+ objEstimate = self.netFiv(tenFirst[-2], tenSecond[-2], objEstimate)
279
+ objEstimate = self.netFou(tenFirst[-3], tenSecond[-3], objEstimate)
280
+ objEstimate = self.netThr(tenFirst[-4], tenSecond[-4], objEstimate)
281
+ objEstimate = self.netTwo(tenFirst[-5], tenSecond[-5], objEstimate)
282
+
283
+ tenFlow = objEstimate['tenFlow'] + self.netRefiner(objEstimate['tenFeat'])
284
+ tenFlow = 20.0 * torch.nn.functional.interpolate(input=tenFlow, size=(intHeight, intWidth), mode='bilinear', align_corners=False)
285
+ tenFlow[:, 0, :, :] *= float(intWidth) / float(intPreprocessedWidth)
286
+ tenFlow[:, 1, :, :] *= float(intHeight) / float(intPreprocessedHeight)
287
+
288
+ return tenFlow
289
+ # end
290
+ # end
291
+
292
+ netNetwork = None
293
+
294
+ ##########################################################
295
+
296
+ def estimate(tenFirst, tenSecond):
297
+ global netNetwork
298
+
299
+ if netNetwork is None:
300
+ netNetwork = Network().cuda().eval()
301
+ # end
302
+
303
+ assert(tenFirst.shape[1] == tenSecond.shape[1])
304
+ assert(tenFirst.shape[2] == tenSecond.shape[2])
305
+
306
+ intWidth = tenFirst.shape[2]
307
+ intHeight = tenFirst.shape[1]
308
+
309
+ assert(intWidth == 1024) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
310
+ assert(intHeight == 436) # remember that there is no guarantee for correctness, comment this line out if you acknowledge this and want to continue
311
+
312
+ tenPreprocessedFirst = tenFirst.cuda().view(1, 3, intHeight, intWidth)
313
+ tenPreprocessedSecond = tenSecond.cuda().view(1, 3, intHeight, intWidth)
314
+
315
+ intPreprocessedWidth = int(math.floor(math.ceil(intWidth / 64.0) * 64.0))
316
+ intPreprocessedHeight = int(math.floor(math.ceil(intHeight / 64.0) * 64.0))
317
+
318
+ tenPreprocessedFirst = torch.nn.functional.interpolate(input=tenPreprocessedFirst, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
319
+ tenPreprocessedSecond = torch.nn.functional.interpolate(input=tenPreprocessedSecond, size=(intPreprocessedHeight, intPreprocessedWidth), mode='bilinear', align_corners=False)
320
+
321
+ tenFlow = 20.0 * torch.nn.functional.interpolate(input=netNetwork(tenPreprocessedFirst, tenPreprocessedSecond), size=(intHeight, intWidth), mode='bilinear', align_corners=False)
322
+
323
+ tenFlow[:, 0, :, :] *= float(intWidth) / float(intPreprocessedWidth)
324
+ tenFlow[:, 1, :, :] *= float(intHeight) / float(intPreprocessedHeight)
325
+
326
+ return tenFlow[0, :, :, :].cpu()
327
+ # end
328
+
329
+ ##########################################################
330
+
331
+ # if __name__ == '__main__':
332
+ # tenFirst = torch.FloatTensor(numpy.ascontiguousarray(numpy.array(PIL.Image.open(arguments_strFirst))[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0)))
333
+ # tenSecond = torch.FloatTensor(numpy.ascontiguousarray(numpy.array(PIL.Image.open(arguments_strSecond))[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32) * (1.0 / 255.0)))
334
+
335
+ # tenOutput = estimate(tenFirst, tenSecond)
336
+
337
+ # objOutput = open(arguments_strOut, 'wb')
338
+
339
+ # numpy.array([ 80, 73, 69, 72 ], numpy.uint8).tofile(objOutput)
340
+ # numpy.array([ tenOutput.shape[2], tenOutput.shape[1] ], numpy.int32).tofile(objOutput)
341
+ # numpy.array(tenOutput.numpy().transpose(1, 2, 0), numpy.float32).tofile(objOutput)
342
+
343
+ # objOutput.close()
344
+ # end
causalvideovae/eval/flolpips/utils.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import torch
4
+
5
+
6
+ def normalize_tensor(in_feat,eps=1e-10):
7
+ norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True))
8
+ return in_feat/(norm_factor+eps)
9
+
10
+ def l2(p0, p1, range=255.):
11
+ return .5*np.mean((p0 / range - p1 / range)**2)
12
+
13
+ def dssim(p0, p1, range=255.):
14
+ from skimage.measure import compare_ssim
15
+ return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
16
+
17
+ def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
18
+ image_numpy = image_tensor[0].cpu().float().numpy()
19
+ image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
20
+ return image_numpy.astype(imtype)
21
+
22
+ def tensor2np(tensor_obj):
23
+ # change dimension of a tensor object into a numpy array
24
+ return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))
25
+
26
+ def np2tensor(np_obj):
27
+ # change dimenion of np array into tensor array
28
+ return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
29
+
30
+ def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
31
+ # image tensor to lab tensor
32
+ from skimage import color
33
+
34
+ img = tensor2im(image_tensor)
35
+ img_lab = color.rgb2lab(img)
36
+ if(mc_only):
37
+ img_lab[:,:,0] = img_lab[:,:,0]-50
38
+ if(to_norm and not mc_only):
39
+ img_lab[:,:,0] = img_lab[:,:,0]-50
40
+ img_lab = img_lab/100.
41
+
42
+ return np2tensor(img_lab)
43
+
44
+ def read_frame_yuv2rgb(stream, width, height, iFrame, bit_depth, pix_fmt='420'):
45
+ if pix_fmt == '420':
46
+ multiplier = 1
47
+ uv_factor = 2
48
+ elif pix_fmt == '444':
49
+ multiplier = 2
50
+ uv_factor = 1
51
+ else:
52
+ print('Pixel format {} is not supported'.format(pix_fmt))
53
+ return
54
+
55
+ if bit_depth == 8:
56
+ datatype = np.uint8
57
+ stream.seek(iFrame*1.5*width*height*multiplier)
58
+ Y = np.fromfile(stream, dtype=datatype, count=width*height).reshape((height, width))
59
+
60
+ # read chroma samples and upsample since original is 4:2:0 sampling
61
+ U = np.fromfile(stream, dtype=datatype, count=(width//uv_factor)*(height//uv_factor)).\
62
+ reshape((height//uv_factor, width//uv_factor))
63
+ V = np.fromfile(stream, dtype=datatype, count=(width//uv_factor)*(height//uv_factor)).\
64
+ reshape((height//uv_factor, width//uv_factor))
65
+
66
+ else:
67
+ datatype = np.uint16
68
+ stream.seek(iFrame*3*width*height*multiplier)
69
+ Y = np.fromfile(stream, dtype=datatype, count=width*height).reshape((height, width))
70
+
71
+ U = np.fromfile(stream, dtype=datatype, count=(width//uv_factor)*(height//uv_factor)).\
72
+ reshape((height//uv_factor, width//uv_factor))
73
+ V = np.fromfile(stream, dtype=datatype, count=(width//uv_factor)*(height//uv_factor)).\
74
+ reshape((height//uv_factor, width//uv_factor))
75
+
76
+ if pix_fmt == '420':
77
+ yuv = np.empty((height*3//2, width), dtype=datatype)
78
+ yuv[0:height,:] = Y
79
+
80
+ yuv[height:height+height//4,:] = U.reshape(-1, width)
81
+ yuv[height+height//4:,:] = V.reshape(-1, width)
82
+
83
+ if bit_depth != 8:
84
+ yuv = (yuv/(2**bit_depth-1)*255).astype(np.uint8)
85
+
86
+ #convert to rgb
87
+ rgb = cv2.cvtColor(yuv, cv2.COLOR_YUV2RGB_I420)
88
+
89
+ else:
90
+ yvu = np.stack([Y,V,U],axis=2)
91
+ if bit_depth != 8:
92
+ yvu = (yvu/(2**bit_depth-1)*255).astype(np.uint8)
93
+ rgb = cv2.cvtColor(yvu, cv2.COLOR_YCrCb2RGB)
94
+
95
+ return rgb
causalvideovae/eval/fvd/styleganv/fvd.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import math
4
+ import torch.nn.functional as F
5
+
6
+ # https://github.com/universome/fvd-comparison
7
+
8
+
9
+ def load_i3d_pretrained(device=torch.device('cpu')):
10
+ i3D_WEIGHTS_URL = "https://www.dropbox.com/s/ge9e5ujwgetktms/i3d_torchscript.pt"
11
+ filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'i3d_torchscript.pt')
12
+ print(filepath)
13
+ if not os.path.exists(filepath):
14
+ print(f"preparing for download {i3D_WEIGHTS_URL}, you can download it by yourself.")
15
+ os.system(f"wget {i3D_WEIGHTS_URL} -O {filepath}")
16
+ i3d = torch.jit.load(filepath).eval().to(device)
17
+ i3d = torch.nn.DataParallel(i3d)
18
+ return i3d
19
+
20
+
21
+ def get_feats(videos, detector, device, bs=10):
22
+ # videos : torch.tensor BCTHW [0, 1]
23
+ detector_kwargs = dict(rescale=False, resize=False, return_features=True) # Return raw features before the softmax layer.
24
+ feats = np.empty((0, 400))
25
+ with torch.no_grad():
26
+ for i in range((len(videos)-1)//bs + 1):
27
+ feats = np.vstack([feats, detector(torch.stack([preprocess_single(video) for video in videos[i*bs:(i+1)*bs]]).to(device), **detector_kwargs).detach().cpu().numpy()])
28
+ return feats
29
+
30
+
31
+ def get_fvd_feats(videos, i3d, device, bs=10):
32
+ # videos in [0, 1] as torch tensor BCTHW
33
+ # videos = [preprocess_single(video) for video in videos]
34
+ embeddings = get_feats(videos, i3d, device, bs)
35
+ return embeddings
36
+
37
+
38
+ def preprocess_single(video, resolution=224, sequence_length=None):
39
+ # video: CTHW, [0, 1]
40
+ c, t, h, w = video.shape
41
+
42
+ # temporal crop
43
+ if sequence_length is not None:
44
+ assert sequence_length <= t
45
+ video = video[:, :sequence_length]
46
+
47
+ # scale shorter side to resolution
48
+ scale = resolution / min(h, w)
49
+ if h < w:
50
+ target_size = (resolution, math.ceil(w * scale))
51
+ else:
52
+ target_size = (math.ceil(h * scale), resolution)
53
+ video = F.interpolate(video, size=target_size, mode='bilinear', align_corners=False)
54
+
55
+ # center crop
56
+ c, t, h, w = video.shape
57
+ w_start = (w - resolution) // 2
58
+ h_start = (h - resolution) // 2
59
+ video = video[:, :, h_start:h_start + resolution, w_start:w_start + resolution]
60
+
61
+ # [0, 1] -> [-1, 1]
62
+ video = (video - 0.5) * 2
63
+
64
+ return video.contiguous()
65
+
66
+
67
+ """
68
+ Copy-pasted from https://github.com/cvpr2022-stylegan-v/stylegan-v/blob/main/src/metrics/frechet_video_distance.py
69
+ """
70
+ from typing import Tuple
71
+ from scipy.linalg import sqrtm
72
+ import numpy as np
73
+
74
+
75
+ def compute_stats(feats: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
76
+ mu = feats.mean(axis=0) # [d]
77
+ sigma = np.cov(feats, rowvar=False) # [d, d]
78
+ return mu, sigma
79
+
80
+
81
+ def frechet_distance(feats_fake: np.ndarray, feats_real: np.ndarray) -> float:
82
+ mu_gen, sigma_gen = compute_stats(feats_fake)
83
+ mu_real, sigma_real = compute_stats(feats_real)
84
+ m = np.square(mu_gen - mu_real).sum()
85
+ if feats_fake.shape[0]>1:
86
+ s, _ = sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member
87
+ fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2))
88
+ else:
89
+ fid = np.real(m)
90
+ return float(fid)
causalvideovae/eval/fvd/videogpt/fvd.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import os
3
+ import math
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ import einops
7
+
8
+ def load_i3d_pretrained(device=torch.device('cpu')):
9
+ i3D_WEIGHTS_URL = "https://onedrive.live.com/download?cid=78EEF3EB6AE7DBCB&resid=78EEF3EB6AE7DBCB%21199&authkey=AApKdFHPXzWLNyI"
10
+ filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'i3d_pretrained_400.pt')
11
+ print(filepath)
12
+ if not os.path.exists(filepath):
13
+ print(f"preparing for download {i3D_WEIGHTS_URL}, you can download it by yourself.")
14
+ os.system(f"wget {i3D_WEIGHTS_URL} -O {filepath}")
15
+ from .pytorch_i3d import InceptionI3d
16
+ i3d = InceptionI3d(400, in_channels=3).eval().to(device)
17
+ i3d.load_state_dict(torch.load(filepath, map_location=device))
18
+ i3d = torch.nn.DataParallel(i3d)
19
+ return i3d
20
+
21
+ def preprocess_single(video, resolution, sequence_length=None):
22
+ # video: THWC, {0, ..., 255}
23
+ video = video.permute(0, 3, 1, 2).float() / 255. # TCHW
24
+ t, c, h, w = video.shape
25
+
26
+ # temporal crop
27
+ if sequence_length is not None:
28
+ assert sequence_length <= t
29
+ video = video[:sequence_length]
30
+
31
+ # scale shorter side to resolution
32
+ scale = resolution / min(h, w)
33
+ if h < w:
34
+ target_size = (resolution, math.ceil(w * scale))
35
+ else:
36
+ target_size = (math.ceil(h * scale), resolution)
37
+ video = F.interpolate(video, size=target_size, mode='bilinear',
38
+ align_corners=False)
39
+
40
+ # center crop
41
+ t, c, h, w = video.shape
42
+ w_start = (w - resolution) // 2
43
+ h_start = (h - resolution) // 2
44
+ video = video[:, :, h_start:h_start + resolution, w_start:w_start + resolution]
45
+ video = video.permute(1, 0, 2, 3).contiguous() # CTHW
46
+
47
+ video -= 0.5
48
+
49
+ return video
50
+
51
+ def preprocess(videos, target_resolution=224):
52
+ # we should tras videos in [0-1] [b c t h w] as th.float
53
+ # -> videos in {0, ..., 255} [b t h w c] as np.uint8 array
54
+ videos = einops.rearrange(videos, 'b c t h w -> b t h w c')
55
+ videos = (videos*255).numpy().astype(np.uint8)
56
+
57
+ b, t, h, w, c = videos.shape
58
+ videos = torch.from_numpy(videos)
59
+ videos = torch.stack([preprocess_single(video, target_resolution) for video in videos])
60
+ return videos * 2 # [-0.5, 0.5] -> [-1, 1]
61
+
62
+ def get_fvd_logits(videos, i3d, device, bs=10):
63
+ videos = preprocess(videos)
64
+ embeddings = get_logits(i3d, videos, device, bs=10)
65
+ return embeddings
66
+
67
+ # https://github.com/tensorflow/gan/blob/de4b8da3853058ea380a6152bd3bd454013bf619/tensorflow_gan/python/eval/classifier_metrics.py#L161
68
+ def _symmetric_matrix_square_root(mat, eps=1e-10):
69
+ u, s, v = torch.svd(mat)
70
+ si = torch.where(s < eps, s, torch.sqrt(s))
71
+ return torch.matmul(torch.matmul(u, torch.diag(si)), v.t())
72
+
73
+ # https://github.com/tensorflow/gan/blob/de4b8da3853058ea380a6152bd3bd454013bf619/tensorflow_gan/python/eval/classifier_metrics.py#L400
74
+ def trace_sqrt_product(sigma, sigma_v):
75
+ sqrt_sigma = _symmetric_matrix_square_root(sigma)
76
+ sqrt_a_sigmav_a = torch.matmul(sqrt_sigma, torch.matmul(sigma_v, sqrt_sigma))
77
+ return torch.trace(_symmetric_matrix_square_root(sqrt_a_sigmav_a))
78
+
79
+ # https://discuss.pytorch.org/t/covariance-and-gradient-support/16217/2
80
+ def cov(m, rowvar=False):
81
+ '''Estimate a covariance matrix given data.
82
+
83
+ Covariance indicates the level to which two variables vary together.
84
+ If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
85
+ then the covariance matrix element `C_{ij}` is the covariance of
86
+ `x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.
87
+
88
+ Args:
89
+ m: A 1-D or 2-D array containing multiple variables and observations.
90
+ Each row of `m` represents a variable, and each column a single
91
+ observation of all those variables.
92
+ rowvar: If `rowvar` is True, then each row represents a
93
+ variable, with observations in the columns. Otherwise, the
94
+ relationship is transposed: each column represents a variable,
95
+ while the rows contain observations.
96
+
97
+ Returns:
98
+ The covariance matrix of the variables.
99
+ '''
100
+ if m.dim() > 2:
101
+ raise ValueError('m has more than 2 dimensions')
102
+ if m.dim() < 2:
103
+ m = m.view(1, -1)
104
+ if not rowvar and m.size(0) != 1:
105
+ m = m.t()
106
+
107
+ fact = 1.0 / (m.size(1) - 1) # unbiased estimate
108
+ m -= torch.mean(m, dim=1, keepdim=True)
109
+ mt = m.t() # if complex: mt = m.t().conj()
110
+ return fact * m.matmul(mt).squeeze()
111
+
112
+
113
+ def frechet_distance(x1, x2):
114
+ x1 = x1.flatten(start_dim=1)
115
+ x2 = x2.flatten(start_dim=1)
116
+ m, m_w = x1.mean(dim=0), x2.mean(dim=0)
117
+ sigma, sigma_w = cov(x1, rowvar=False), cov(x2, rowvar=False)
118
+ mean = torch.sum((m - m_w) ** 2)
119
+ if x1.shape[0]>1:
120
+ sqrt_trace_component = trace_sqrt_product(sigma, sigma_w)
121
+ trace = torch.trace(sigma + sigma_w) - 2.0 * sqrt_trace_component
122
+ fd = trace + mean
123
+ else:
124
+ fd = np.real(mean)
125
+ return float(fd)
126
+
127
+
128
+ def get_logits(i3d, videos, device, bs=10):
129
+ # assert videos.shape[0] % 16 == 0
130
+ with torch.no_grad():
131
+ logits = []
132
+ for i in range(0, videos.shape[0], bs):
133
+ batch = videos[i:i + bs].to(device)
134
+ # logits.append(i3d.module.extract_features(batch)) # wrong
135
+ logits.append(i3d(batch)) # right
136
+ logits = torch.cat(logits, dim=0)
137
+ return logits
causalvideovae/eval/script/cal_lpips.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ python eval_common_metric.py \
2
+ --real_video_dir path/to/imageA\
3
+ --generated_video_dir path/to/imageB \
4
+ --batch_size 10 \
5
+ --num_frames 20 \
6
+ --crop_size 64 \
7
+ --device 'cuda' \
8
+ --metric 'lpips'
causalvideovae/model/losses/perceptual_loss.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+ from .lpips import LPIPS
5
+ from einops import rearrange
6
+ from .discriminator import NLayerDiscriminator, weights_init, NLayerDiscriminator3D
7
+
8
+
9
+ def hinge_d_loss(logits_real, logits_fake):
10
+ loss_real = torch.mean(F.relu(1.0 - logits_real))
11
+ loss_fake = torch.mean(F.relu(1.0 + logits_fake))
12
+ d_loss = 0.5 * (loss_real + loss_fake)
13
+ return d_loss
14
+
15
+
16
+ def vanilla_d_loss(logits_real, logits_fake):
17
+ d_loss = 0.5 * (
18
+ torch.mean(torch.nn.functional.softplus(-logits_real))
19
+ + torch.mean(torch.nn.functional.softplus(logits_fake))
20
+ )
21
+ return d_loss
22
+
23
+
24
+ def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights):
25
+ assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0]
26
+ loss_real = torch.mean(F.relu(1.0 - logits_real), dim=[1, 2, 3])
27
+ loss_fake = torch.mean(F.relu(1.0 + logits_fake), dim=[1, 2, 3])
28
+ loss_real = (weights * loss_real).sum() / weights.sum()
29
+ loss_fake = (weights * loss_fake).sum() / weights.sum()
30
+ d_loss = 0.5 * (loss_real + loss_fake)
31
+ return d_loss
32
+
33
+
34
+ def adopt_weight(weight, global_step, threshold=0, value=0.0):
35
+ if global_step < threshold:
36
+ weight = value
37
+ return weight
38
+
39
+
40
+ def measure_perplexity(predicted_indices, n_embed):
41
+ # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py
42
+ # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally
43
+ encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed)
44
+ avg_probs = encodings.mean(0)
45
+ perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp()
46
+ cluster_use = torch.sum(avg_probs > 0)
47
+ return perplexity, cluster_use
48
+
49
+
50
+ def l1(x, y):
51
+ return torch.abs(x - y)
52
+
53
+
54
+ def l2(x, y):
55
+ return torch.pow((x - y), 2)
56
+
57
+ def l1_10(x, y):
58
+ return torch.pow((x - y), 0.1)
59
+
60
+
61
+ class LPIPSWithDiscriminator(nn.Module):
62
+ def __init__(
63
+ self,
64
+ disc_start,
65
+ logvar_init=0.0,
66
+ kl_weight=1.0,
67
+ pixelloss_weight=1.0,
68
+ perceptual_weight=1.0,
69
+ # --- Discriminator Loss ---
70
+ disc_num_layers=3,
71
+ disc_in_channels=3,
72
+ disc_factor=1.0,
73
+ disc_weight=1.0,
74
+ use_actnorm=False,
75
+ disc_conditional=False,
76
+ disc_loss="hinge",
77
+ loss_type: str = "l1"
78
+ ):
79
+
80
+ super().__init__()
81
+ assert disc_loss in ["hinge", "vanilla"]
82
+ self.kl_weight = kl_weight
83
+ self.pixel_weight = pixelloss_weight
84
+ self.perceptual_loss = LPIPS().eval()
85
+ self.perceptual_weight = perceptual_weight
86
+ self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
87
+
88
+ self.discriminator = NLayerDiscriminator(
89
+ input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm
90
+ ).apply(weights_init)
91
+ self.discriminator_iter_start = disc_start
92
+ self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
93
+ self.disc_factor = disc_factor
94
+ self.discriminator_weight = disc_weight
95
+ self.disc_conditional = disc_conditional
96
+ self.loss_func = l1 if loss_type == "l1" else l2
97
+
98
+ def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
99
+ layer = last_layer if last_layer is not None else self.last_layer[0]
100
+
101
+ nll_grads = torch.autograd.grad(nll_loss, layer, retain_graph=True)[0]
102
+ g_grads = torch.autograd.grad(g_loss, layer, retain_graph=True)[0]
103
+
104
+ d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
105
+ d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
106
+ d_weight = d_weight * self.discriminator_weight
107
+ return d_weight
108
+
109
+ def forward(
110
+ self,
111
+ inputs,
112
+ reconstructions,
113
+ posteriors,
114
+ optimizer_idx,
115
+ global_step,
116
+ split="train",
117
+ weights=None,
118
+ last_layer=None,
119
+ cond=None,
120
+ ):
121
+ # GAN Part
122
+ inputs = rearrange(inputs, "b c t h w -> (b t) c h w").contiguous()
123
+ reconstructions = rearrange(
124
+ reconstructions, "b c t h w -> (b t) c h w"
125
+ ).contiguous()
126
+ if optimizer_idx == 0:
127
+ rec_loss = self.loss_func(inputs, reconstructions)
128
+ if self.perceptual_weight > 0:
129
+ p_loss = self.perceptual_loss(inputs, reconstructions)
130
+ rec_loss = rec_loss + self.perceptual_weight * p_loss
131
+ nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
132
+ weighted_nll_loss = nll_loss
133
+ if weights is not None:
134
+ weighted_nll_loss = weights * nll_loss
135
+ weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
136
+ nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
137
+ kl_loss = posteriors.kl()
138
+ kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
139
+
140
+ logits_fake = self.discriminator(reconstructions)
141
+ g_loss = -torch.mean(logits_fake)
142
+ if global_step >= self.discriminator_iter_start:
143
+ if self.disc_factor > 0.0:
144
+ d_weight = self.calculate_adaptive_weight(
145
+ nll_loss, g_loss, last_layer=last_layer
146
+ )
147
+ else:
148
+ d_weight = torch.tensor(1.0)
149
+ else:
150
+ d_weight = torch.tensor(0.0)
151
+ g_loss = torch.tensor(0.0, requires_grad=True)
152
+
153
+ disc_factor = adopt_weight(
154
+ self.disc_factor, global_step, threshold=self.discriminator_iter_start
155
+ )
156
+ loss = (
157
+ weighted_nll_loss
158
+ + self.kl_weight * kl_loss
159
+ + d_weight * disc_factor * g_loss
160
+ )
161
+ log = {
162
+ "{}/total_loss".format(split): loss.clone().detach().mean(),
163
+ "{}/logvar".format(split): self.logvar.detach(),
164
+ "{}/kl_loss".format(split): kl_loss.detach().mean(),
165
+ "{}/nll_loss".format(split): nll_loss.detach().mean(),
166
+ "{}/rec_loss".format(split): rec_loss.detach().mean(),
167
+ "{}/d_weight".format(split): d_weight.detach(),
168
+ "{}/disc_factor".format(split): torch.tensor(disc_factor),
169
+ "{}/g_loss".format(split): g_loss.detach().mean(),
170
+ }
171
+ return loss, log
172
+
173
+ if optimizer_idx == 1:
174
+ if cond is None:
175
+ logits_real = self.discriminator(inputs.contiguous().detach())
176
+ logits_fake = self.discriminator(reconstructions.contiguous().detach())
177
+ else:
178
+ logits_real = self.discriminator(
179
+ torch.cat((inputs.contiguous().detach(), cond), dim=1)
180
+ )
181
+ logits_fake = self.discriminator(
182
+ torch.cat((reconstructions.contiguous().detach(), cond), dim=1)
183
+ )
184
+
185
+ disc_factor = adopt_weight(
186
+ self.disc_factor, global_step, threshold=self.discriminator_iter_start
187
+ )
188
+ d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
189
+
190
+ log = {
191
+ "{}/disc_loss".format(split): d_loss.clone().detach().mean(),
192
+ "{}/logits_real".format(split): logits_real.detach().mean(),
193
+ "{}/logits_fake".format(split): logits_fake.detach().mean(),
194
+ }
195
+ return d_loss, log
196
+
197
+
198
+ class LPIPSWithDiscriminator3D(nn.Module):
199
+ def __init__(
200
+ self,
201
+ disc_start,
202
+ logvar_init=0.0,
203
+ kl_weight=1.0,
204
+ pixelloss_weight=1.0,
205
+ perceptual_weight=1.0,
206
+ # --- Discriminator Loss ---
207
+ disc_num_layers=3,
208
+ disc_in_channels=3,
209
+ disc_factor=1.0,
210
+ disc_weight=1.0,
211
+ use_actnorm=False,
212
+ disc_conditional=False,
213
+ disc_loss="hinge",
214
+ learn_logvar: bool = False,
215
+ loss_type: str = "l1"
216
+ ):
217
+
218
+ super().__init__()
219
+ assert disc_loss in ["hinge", "vanilla"]
220
+ self.kl_weight = kl_weight
221
+ self.pixel_weight = pixelloss_weight
222
+ self.perceptual_loss = LPIPS().eval()
223
+ self.perceptual_weight = perceptual_weight
224
+ self.logvar = nn.Parameter(
225
+ torch.full((), logvar_init), requires_grad=learn_logvar
226
+ )
227
+ self.discriminator = NLayerDiscriminator3D(
228
+ input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm
229
+ ).apply(weights_init)
230
+ self.discriminator_iter_start = disc_start
231
+ self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
232
+ self.disc_factor = disc_factor
233
+ self.discriminator_weight = disc_weight
234
+ self.disc_conditional = disc_conditional
235
+ self.loss_func = l1 if loss_type == "l1" else l2
236
+
237
+ def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
238
+ layer = last_layer if last_layer is not None else self.last_layer[0]
239
+
240
+ nll_grads = torch.autograd.grad(nll_loss, layer, retain_graph=True)[0]
241
+ g_grads = torch.autograd.grad(g_loss, layer, retain_graph=True)[0]
242
+
243
+ d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
244
+ d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
245
+ d_weight = d_weight * self.discriminator_weight
246
+ return d_weight
247
+
248
+ def forward(
249
+ self,
250
+ inputs,
251
+ reconstructions,
252
+ posteriors,
253
+ optimizer_idx,
254
+ global_step,
255
+ split="train",
256
+ weights=None,
257
+ last_layer=None,
258
+ cond=None
259
+ ):
260
+
261
+ t = inputs.shape[2]
262
+ # GAN Part
263
+ if optimizer_idx == 0:
264
+ inputs = rearrange(inputs, "b c t h w -> (b t) c h w").contiguous()
265
+ reconstructions = rearrange(
266
+ reconstructions, "b c t h w -> (b t) c h w"
267
+ ).contiguous()
268
+ rec_loss = self.loss_func(inputs, reconstructions)
269
+ if self.perceptual_weight > 0:
270
+ p_loss = self.perceptual_loss(inputs, reconstructions)
271
+ rec_loss = rec_loss + self.perceptual_weight * p_loss
272
+ nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
273
+ weighted_nll_loss = nll_loss
274
+ if weights is not None:
275
+ weighted_nll_loss = weights * nll_loss
276
+ weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
277
+ nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
278
+ kl_loss = posteriors.kl()
279
+ kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
280
+
281
+ inputs = rearrange(inputs, "(b t) c h w -> b c t h w", t=t).contiguous()
282
+ reconstructions = rearrange(
283
+ reconstructions, "(b t) c h w -> b c t h w", t=t
284
+ ).contiguous()
285
+
286
+ logits_fake = self.discriminator(reconstructions)
287
+ g_loss = -torch.mean(logits_fake)
288
+ if global_step >= self.discriminator_iter_start:
289
+ if self.disc_factor > 0.0:
290
+ d_weight = self.calculate_adaptive_weight(
291
+ nll_loss, g_loss, last_layer=last_layer
292
+ )
293
+ else:
294
+ d_weight = torch.tensor(1.0)
295
+ else:
296
+ d_weight = torch.tensor(0.0)
297
+ g_loss = torch.tensor(0.0, requires_grad=True)
298
+
299
+ disc_factor = adopt_weight(
300
+ self.disc_factor, global_step, threshold=self.discriminator_iter_start
301
+ )
302
+ loss = (
303
+ weighted_nll_loss
304
+ + self.kl_weight * kl_loss
305
+ + d_weight * disc_factor * g_loss
306
+ )
307
+ log = {
308
+ "{}/total_loss".format(split): loss.clone().detach().mean(),
309
+ "{}/logvar".format(split): self.logvar.detach(),
310
+ "{}/kl_loss".format(split): kl_loss.detach().mean(),
311
+ "{}/nll_loss".format(split): nll_loss.detach().mean(),
312
+ "{}/rec_loss".format(split): rec_loss.detach().mean(),
313
+ "{}/d_weight".format(split): d_weight.detach(),
314
+ "{}/disc_factor".format(split): torch.tensor(disc_factor),
315
+ "{}/g_loss".format(split): g_loss.detach().mean(),
316
+ }
317
+ return loss, log
318
+ elif optimizer_idx == 1:
319
+ logits_real = self.discriminator(inputs.contiguous().detach())
320
+ logits_fake = self.discriminator(reconstructions.contiguous().detach())
321
+
322
+ disc_factor = adopt_weight(
323
+ self.disc_factor, global_step, threshold=self.discriminator_iter_start
324
+ )
325
+
326
+ d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
327
+
328
+ log = {
329
+ "{}/disc_loss".format(split): d_loss.clone().detach().mean(),
330
+ "{}/logits_real".format(split): logits_real.detach().mean(),
331
+ "{}/logits_fake".format(split): logits_fake.detach().mean(),
332
+ }
333
+ return d_loss, log
334
+
335
+
336
+ class LPIPSWithDiscriminator3Drefiner(nn.Module):
337
+ def __init__(
338
+ self,
339
+ disc_start,
340
+ logvar_init=0.0,
341
+ pixelloss_weight=1.0,
342
+ perceptual_weight=1.0,
343
+ # --- Discriminator Loss ---
344
+ disc_num_layers=3,
345
+ disc_in_channels=3,
346
+ disc_factor=1.0,
347
+ disc_weight=1.0,
348
+ use_actnorm=False,
349
+ disc_conditional=False,
350
+ disc_loss="hinge",
351
+ learn_logvar: bool = False,
352
+ loss_type: str = "l1"
353
+ ):
354
+
355
+ super().__init__()
356
+ assert disc_loss in ["hinge", "vanilla"]
357
+ self.pixel_weight = pixelloss_weight
358
+ self.perceptual_loss = LPIPS().eval()
359
+ self.perceptual_weight = perceptual_weight
360
+ self.logvar = nn.Parameter(
361
+ torch.full((), logvar_init), requires_grad=learn_logvar
362
+ )
363
+ self.discriminator = NLayerDiscriminator3D(
364
+ input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm
365
+ ).apply(weights_init)
366
+ self.discriminator_iter_start = disc_start
367
+ self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss
368
+ self.disc_factor = disc_factor
369
+ self.discriminator_weight = disc_weight
370
+ self.disc_conditional = disc_conditional
371
+ self.loss_func = l1 if loss_type == "l1" else l2
372
+
373
+ def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
374
+ layer = last_layer if last_layer is not None else self.last_layer[0]
375
+
376
+ nll_grads = torch.autograd.grad(nll_loss, layer, retain_graph=True)[0]
377
+ g_grads = torch.autograd.grad(g_loss, layer, retain_graph=True)[0]
378
+
379
+ d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
380
+ d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
381
+ d_weight = d_weight * self.discriminator_weight
382
+ return d_weight
383
+
384
+ def forward(
385
+ self,
386
+ inputs,
387
+ reconstructions,
388
+ optimizer_idx,
389
+ global_step,
390
+ split="train",
391
+ weights=None,
392
+ last_layer=None,
393
+ cond=None
394
+ ):
395
+
396
+ t = inputs.shape[2]
397
+ # GAN Part
398
+ if optimizer_idx == 0:
399
+ inputs = rearrange(inputs, "b c t h w -> (b t) c h w").contiguous()
400
+ reconstructions = rearrange(
401
+ reconstructions, "b c t h w -> (b t) c h w"
402
+ ).contiguous()
403
+ rec_loss = self.loss_func(inputs, reconstructions)
404
+ if self.perceptual_weight > 0:
405
+ p_loss = self.perceptual_loss(inputs, reconstructions)
406
+ rec_loss = rec_loss + self.perceptual_weight * p_loss
407
+ nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
408
+ weighted_nll_loss = nll_loss
409
+ if weights is not None:
410
+ weighted_nll_loss = weights * nll_loss
411
+ weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
412
+ nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
413
+
414
+ inputs = rearrange(inputs, "(b t) c h w -> b c t h w", t=t).contiguous()
415
+ reconstructions = rearrange(
416
+ reconstructions, "(b t) c h w -> b c t h w", t=t
417
+ ).contiguous()
418
+
419
+ logits_fake = self.discriminator(reconstructions)
420
+ g_loss = -torch.mean(logits_fake)
421
+ if global_step >= self.discriminator_iter_start:
422
+ if self.disc_factor > 0.0:
423
+ d_weight = self.calculate_adaptive_weight(
424
+ nll_loss, g_loss, last_layer=last_layer
425
+ )
426
+ else:
427
+ d_weight = torch.tensor(1.0)
428
+ else:
429
+ d_weight = torch.tensor(0.0)
430
+ g_loss = torch.tensor(0.0, requires_grad=True)
431
+
432
+ disc_factor = adopt_weight(
433
+ self.disc_factor, global_step, threshold=self.discriminator_iter_start
434
+ )
435
+ loss = (
436
+ weighted_nll_loss
437
+ + d_weight * disc_factor * g_loss
438
+ )
439
+ log = {
440
+ "{}/total_loss".format(split): loss.clone().detach().mean(),
441
+ "{}/logvar".format(split): self.logvar.detach(),
442
+ "{}/nll_loss".format(split): nll_loss.detach().mean(),
443
+ "{}/rec_loss".format(split): rec_loss.detach().mean(),
444
+ "{}/d_weight".format(split): d_weight.detach(),
445
+ "{}/disc_factor".format(split): torch.tensor(disc_factor),
446
+ "{}/g_loss".format(split): g_loss.detach().mean(),
447
+ }
448
+ return loss, log
449
+ elif optimizer_idx == 1:
450
+ logits_real = self.discriminator(inputs.contiguous().detach())
451
+ logits_fake = self.discriminator(reconstructions.contiguous().detach())
452
+
453
+ disc_factor = adopt_weight(
454
+ self.disc_factor, global_step, threshold=self.discriminator_iter_start
455
+ )
456
+
457
+ d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
458
+
459
+ log = {
460
+ "{}/disc_loss".format(split): d_loss.clone().detach().mean(),
461
+ "{}/logits_real".format(split): logits_real.detach().mean(),
462
+ "{}/logits_fake".format(split): logits_fake.detach().mean(),
463
+ }
464
+ return d_loss, log
465
+
466
+ class SimpleLPIPS(nn.Module):
467
+ def __init__(
468
+ self,
469
+ logvar_init=0.0,
470
+ kl_weight=1.0,
471
+ pixelloss_weight=1.0,
472
+ perceptual_weight=1.0,
473
+ disc_loss="hinge",
474
+ learn_logvar: bool = False,
475
+ **kwargs
476
+ ):
477
+
478
+ super().__init__()
479
+ assert disc_loss in ["hinge", "vanilla"]
480
+ self.kl_weight = kl_weight
481
+ self.pixel_weight = pixelloss_weight
482
+ self.perceptual_loss = LPIPS().eval()
483
+ self.perceptual_weight = perceptual_weight
484
+ self.logvar = nn.Parameter(
485
+ torch.full((), logvar_init), requires_grad=learn_logvar
486
+ )
487
+
488
+ def forward(
489
+ self,
490
+ inputs,
491
+ reconstructions,
492
+ posteriors,
493
+ split="train",
494
+ weights=None,
495
+ ):
496
+ inputs = rearrange(inputs, "b c t h w -> (b t) c h w").contiguous()
497
+ reconstructions = rearrange(
498
+ reconstructions, "b c t h w -> (b t) c h w"
499
+ ).contiguous()
500
+ rec_loss = torch.abs(inputs - reconstructions)
501
+ if self.perceptual_weight > 0:
502
+ p_loss = self.perceptual_loss(inputs, reconstructions)
503
+ rec_loss = rec_loss + self.perceptual_weight * p_loss
504
+ nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
505
+ weighted_nll_loss = nll_loss
506
+ if weights is not None:
507
+ weighted_nll_loss = weights * nll_loss
508
+ weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
509
+ nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
510
+ kl_loss = posteriors.kl()
511
+ kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
512
+ loss = weighted_nll_loss + self.kl_weight * kl_loss
513
+ log = {
514
+ "{}/total_loss".format(split): loss.clone().detach().mean(),
515
+ "{}/logvar".format(split): self.logvar.detach(),
516
+ "{}/kl_loss".format(split): kl_loss.detach().mean(),
517
+ "{}/nll_loss".format(split): nll_loss.detach().mean(),
518
+ "{}/rec_loss".format(split): rec_loss.detach().mean(),
519
+ }
520
+ if self.perceptual_weight > 0:
521
+ log.update({"{}/p_loss".format(split): p_loss.detach().mean()})
522
+ return loss, log
causalvideovae/model/modules/attention.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from .normalize import Normalize
3
+ from .conv import CausalConv3d
4
+ import torch
5
+ import numpy as np
6
+ from einops import rearrange
7
+ from .block import Block
8
+ from .ops import video_to_image
9
+
10
+ class LinearAttention(Block):
11
+ def __init__(self, dim, heads=4, dim_head=32):
12
+ super().__init__()
13
+ self.heads = heads
14
+ hidden_dim = dim_head * heads
15
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
16
+ self.to_out = nn.Conv2d(hidden_dim, dim, 1)
17
+
18
+ def forward(self, x):
19
+ b, c, h, w = x.shape
20
+ qkv = self.to_qkv(x)
21
+ q, k, v = rearrange(
22
+ qkv, "b (qkv heads c) h w -> qkv b heads c (h w)", heads=self.heads, qkv=3
23
+ )
24
+ k = k.softmax(dim=-1)
25
+ context = torch.einsum("bhdn,bhen->bhde", k, v)
26
+ out = torch.einsum("bhde,bhdn->bhen", context, q)
27
+ out = rearrange(
28
+ out, "b heads c (h w) -> b (heads c) h w", heads=self.heads, h=h, w=w
29
+ )
30
+ return self.to_out(out)
31
+
32
+
33
+ class LinAttnBlock(LinearAttention):
34
+ """to match AttnBlock usage"""
35
+
36
+ def __init__(self, in_channels):
37
+ super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
38
+
39
+
40
+ class AttnBlock3D(Block):
41
+ """Compatible with old versions, there are issues, use with caution."""
42
+ def __init__(self, in_channels):
43
+ super().__init__()
44
+ self.in_channels = in_channels
45
+
46
+ self.norm = Normalize(in_channels)
47
+ self.q = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
48
+ self.k = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
49
+ self.v = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
50
+ self.proj_out = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
51
+
52
+ def forward(self, x):
53
+ h_ = x
54
+ h_ = self.norm(h_)
55
+ q = self.q(h_)
56
+ k = self.k(h_)
57
+ v = self.v(h_)
58
+
59
+ # compute attention
60
+ b, c, t, h, w = q.shape
61
+ q = q.reshape(b * t, c, h * w)
62
+ q = q.permute(0, 2, 1) # b,hw,c
63
+ k = k.reshape(b * t, c, h * w) # b,c,hw
64
+ w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
65
+ w_ = w_ * (int(c) ** (-0.5))
66
+ w_ = torch.nn.functional.softmax(w_, dim=2)
67
+
68
+ # attend to values
69
+ v = v.reshape(b * t, c, h * w)
70
+ w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
71
+ h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
72
+ h_ = h_.reshape(b, c, t, h, w)
73
+
74
+ h_ = self.proj_out(h_)
75
+
76
+ return x + h_
77
+
78
+ class AttnBlock3DFixNorm(nn.Module):
79
+ """
80
+ Thanks to https://github.com/PKU-YuanGroup/Open-Sora-Plan/pull/172.
81
+ """
82
+ def __init__(self, in_channels):
83
+ super().__init__()
84
+ self.in_channels = in_channels
85
+
86
+ self.norm = Normalize(in_channels)
87
+ self.q = torch.nn.Conv3d(in_channels, in_channels, kernel_size=1, stride=1)
88
+ self.k = torch.nn.Conv3d(in_channels, in_channels, kernel_size=1, stride=1)
89
+ self.v = torch.nn.Conv3d(in_channels, in_channels, kernel_size=1, stride=1)
90
+ self.proj_out = torch.nn.Conv3d(in_channels, in_channels, kernel_size=1, stride=1)
91
+
92
+ def forward(self, x):
93
+ h_ = x
94
+ h_ = h_.permute(0, 2, 3, 4, 1)
95
+ h_ = self.norm(h_)
96
+ h_ = h_.permute(0, 4, 1, 2, 3)
97
+ q = self.q(h_)
98
+ k = self.k(h_)
99
+ v = self.v(h_)
100
+
101
+ # compute attention
102
+ # q: (b c t h w) -> (b t c h w) -> (b*t c h*w) -> (b*t h*w c)
103
+ b, c, t, h, w = q.shape
104
+ q = q.permute(0, 2, 1, 3, 4)
105
+ q = q.reshape(b * t, c, h * w)
106
+ q = q.permute(0, 2, 1)
107
+
108
+ # k: (b c t h w) -> (b t c h w) -> (b*t c h*w)
109
+ k = k.permute(0, 2, 1, 3, 4)
110
+ k = k.reshape(b * t, c, h * w)
111
+
112
+ # w: (b*t hw hw)
113
+ w_ = torch.bmm(q, k)
114
+ w_ = w_ * (int(c) ** (-0.5))
115
+ w_ = torch.nn.functional.softmax(w_, dim=2)
116
+
117
+ # attend to values
118
+ # v: (b c t h w) -> (b t c h w) -> (bt c hw)
119
+ # w_: (bt hw hw) -> (bt hw hw)
120
+ v = v.permute(0, 2, 1, 3, 4)
121
+ v = v.reshape(b * t, c, h * w)
122
+ w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
123
+ h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
124
+
125
+ # h_: (b*t c hw) -> (b t c h w) -> (b c t h w)
126
+ h_ = h_.reshape(b, t, c, h, w)
127
+ h_ = h_.permute(0, 2, 1, 3 ,4)
128
+
129
+ h_ = self.proj_out(h_)
130
+
131
+ return x + h_
132
+
133
+ class AttnBlock3DFix(nn.Module):
134
+ """
135
+ Thanks to https://github.com/PKU-YuanGroup/Open-Sora-Plan/pull/172.
136
+ """
137
+ def __init__(self, in_channels):
138
+ super().__init__()
139
+ self.in_channels = in_channels
140
+
141
+ self.norm = Normalize(in_channels)
142
+ self.q = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
143
+ self.k = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
144
+ self.v = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
145
+ self.proj_out = CausalConv3d(in_channels, in_channels, kernel_size=1, stride=1)
146
+
147
+ def forward(self, x):
148
+ h = x
149
+ h = h.permute(0, 2, 3, 4, 1)
150
+ h = self.norm(h)
151
+ h_ = h.permute(0, 4, 1, 2, 3)
152
+ q = self.q(h_)
153
+ k = self.k(h_)
154
+ v = self.v(h_)
155
+
156
+ # compute attention
157
+ # q: (b c t h w) -> (b t c h w) -> (b*t c h*w) -> (b*t h*w c)
158
+ b, c, t, h, w = q.shape
159
+ q = q.permute(0, 2, 1, 3, 4)
160
+ q = q.reshape(b * t, c, h * w)
161
+ q = q.permute(0, 2, 1)
162
+
163
+ # k: (b c t h w) -> (b t c h w) -> (b*t c h*w)
164
+ k = k.permute(0, 2, 1, 3, 4)
165
+ k = k.reshape(b * t, c, h * w)
166
+
167
+ # w: (b*t hw hw)
168
+ w_ = torch.bmm(q, k)
169
+ w_ = w_ * (int(c) ** (-0.5))
170
+ w_ = torch.nn.functional.softmax(w_, dim=2)
171
+
172
+ # attend to values
173
+ # v: (b c t h w) -> (b t c h w) -> (bt c hw)
174
+ # w_: (bt hw hw) -> (bt hw hw)
175
+ v = v.permute(0, 2, 1, 3, 4)
176
+ v = v.reshape(b * t, c, h * w)
177
+ w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
178
+ h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
179
+
180
+ # h_: (b*t c hw) -> (b t c h w) -> (b c t h w)
181
+ h_ = h_.reshape(b, t, c, h, w)
182
+ h_ = h_.permute(0, 2, 1, 3 ,4)
183
+
184
+ h_ = self.proj_out(h_)
185
+
186
+ return x + h_
187
+
188
+
189
+ class AttnBlock(Block):
190
+ def __init__(self, in_channels):
191
+ super().__init__()
192
+ self.in_channels = in_channels
193
+
194
+ self.norm = Normalize(in_channels)
195
+ self.q = torch.nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
196
+ self.k = torch.nn.Conv2d(
197
+ in_channels, in_channels, kernel_size=1, stride=1, padding=0
198
+ )
199
+ self.v = torch.nn.Conv2d(
200
+ in_channels, in_channels, kernel_size=1, stride=1, padding=0
201
+ )
202
+ self.proj_out = torch.nn.Conv2d(
203
+ in_channels, in_channels, kernel_size=1, stride=1, padding=0
204
+ )
205
+
206
+ @video_to_image
207
+ def forward(self, x):
208
+ h_ = x
209
+ h_ = self.norm(h_)
210
+ q = self.q(h_)
211
+ k = self.k(h_)
212
+ v = self.v(h_)
213
+
214
+ # compute attention
215
+ b, c, h, w = q.shape
216
+ q = q.reshape(b, c, h * w)
217
+ q = q.permute(0, 2, 1) # b,hw,c
218
+ k = k.reshape(b, c, h * w) # b,c,hw
219
+ w_ = torch.bmm(q, k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
220
+ w_ = w_ * (int(c) ** (-0.5))
221
+ w_ = torch.nn.functional.softmax(w_, dim=2)
222
+
223
+ # attend to values
224
+ v = v.reshape(b, c, h * w)
225
+ w_ = w_.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
226
+ h_ = torch.bmm(v, w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
227
+ h_ = h_.reshape(b, c, h, w)
228
+
229
+ h_ = self.proj_out(h_)
230
+
231
+ return x + h_
232
+
233
+
234
+ class TemporalAttnBlock(Block):
235
+ def __init__(self, in_channels):
236
+ super().__init__()
237
+ self.in_channels = in_channels
238
+
239
+ self.norm = Normalize(in_channels)
240
+ self.q = torch.nn.Conv3d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
241
+ self.k = torch.nn.Conv3d(
242
+ in_channels, in_channels, kernel_size=1, stride=1, padding=0
243
+ )
244
+ self.v = torch.nn.Conv3d(
245
+ in_channels, in_channels, kernel_size=1, stride=1, padding=0
246
+ )
247
+ self.proj_out = torch.nn.Conv3d(
248
+ in_channels, in_channels, kernel_size=1, stride=1, padding=0
249
+ )
250
+
251
+ def forward(self, x):
252
+ h_ = x
253
+ h_ = self.norm(h_)
254
+ q = self.q(h_)
255
+ k = self.k(h_)
256
+ v = self.v(h_)
257
+
258
+ # compute attention
259
+ b, c, t, h, w = q.shape
260
+ q = rearrange(q, "b c t h w -> (b h w) t c")
261
+ k = rearrange(k, "b c t h w -> (b h w) c t")
262
+ v = rearrange(v, "b c t h w -> (b h w) c t")
263
+ w_ = torch.bmm(q, k)
264
+ w_ = w_ * (int(c) ** (-0.5))
265
+ w_ = torch.nn.functional.softmax(w_, dim=2)
266
+
267
+ # attend to values
268
+ w_ = w_.permute(0, 2, 1)
269
+ h_ = torch.bmm(v, w_)
270
+ h_ = rearrange(h_, "(b h w) c t -> b c t h w", h=h, w=w)
271
+ h_ = self.proj_out(h_)
272
+
273
+ return x + h_
274
+
275
+
276
+ def make_attn(in_channels, attn_type="vanilla"):
277
+ assert attn_type in ["vanilla", "linear", "none", "vanilla3D"], f"attn_type {attn_type} unknown"
278
+ print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
279
+ print(attn_type)
280
+ if attn_type == "vanilla":
281
+ return AttnBlock(in_channels)
282
+ elif attn_type == "vanilla3D":
283
+ return AttnBlock3D(in_channels)
284
+ elif attn_type == "none":
285
+ return nn.Identity(in_channels)
286
+ else:
287
+ return LinAttnBlock(in_channels)
causalvideovae/model/modules/normalize.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from .block import Block
4
+
5
+ class GroupNorm(Block):
6
+ def __init__(self, num_channels, num_groups=32, eps=1e-6, *args, **kwargs) -> None:
7
+ super().__init__(*args, **kwargs)
8
+ self.norm = torch.nn.GroupNorm(
9
+ num_groups=num_groups, num_channels=num_channels, eps=1e-6, affine=True
10
+ )
11
+ def forward(self, x):
12
+ return self.norm(x)
13
+
14
+ def Normalize(in_channels, num_groups=32):
15
+ """
16
+ return torch.nn.GroupNorm(
17
+ num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True
18
+ )
19
+ """
20
+ return torch.nn.LayerNorm(in_channels, eps=1e-12, elementwise_affine=True)
21
+
22
+ class ActNorm(nn.Module):
23
+ def __init__(self, num_features, logdet=False, affine=True,
24
+ allow_reverse_init=False):
25
+ assert affine
26
+ super().__init__()
27
+ self.logdet = logdet
28
+ self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
29
+ self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
30
+ self.allow_reverse_init = allow_reverse_init
31
+
32
+ self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
33
+
34
+ def initialize(self, input):
35
+ with torch.no_grad():
36
+ flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
37
+ mean = (
38
+ flatten.mean(1)
39
+ .unsqueeze(1)
40
+ .unsqueeze(2)
41
+ .unsqueeze(3)
42
+ .permute(1, 0, 2, 3)
43
+ )
44
+ std = (
45
+ flatten.std(1)
46
+ .unsqueeze(1)
47
+ .unsqueeze(2)
48
+ .unsqueeze(3)
49
+ .permute(1, 0, 2, 3)
50
+ )
51
+
52
+ self.loc.data.copy_(-mean)
53
+ self.scale.data.copy_(1 / (std + 1e-6))
54
+
55
+ def forward(self, input, reverse=False):
56
+ if reverse:
57
+ return self.reverse(input)
58
+ if len(input.shape) == 2:
59
+ input = input[:,:,None,None]
60
+ squeeze = True
61
+ else:
62
+ squeeze = False
63
+
64
+ _, _, height, width = input.shape
65
+
66
+ if self.training and self.initialized.item() == 0:
67
+ self.initialize(input)
68
+ self.initialized.fill_(1)
69
+
70
+ h = self.scale * (input + self.loc)
71
+
72
+ if squeeze:
73
+ h = h.squeeze(-1).squeeze(-1)
74
+
75
+ if self.logdet:
76
+ log_abs = torch.log(torch.abs(self.scale))
77
+ logdet = height*width*torch.sum(log_abs)
78
+ logdet = logdet * torch.ones(input.shape[0]).to(input)
79
+ return h, logdet
80
+
81
+ return h
82
+
83
+ def reverse(self, output):
84
+ if self.training and self.initialized.item() == 0:
85
+ if not self.allow_reverse_init:
86
+ raise RuntimeError(
87
+ "Initializing ActNorm in reverse direction is "
88
+ "disabled by default. Use allow_reverse_init=True to enable."
89
+ )
90
+ else:
91
+ self.initialize(output)
92
+ self.initialized.fill_(1)
93
+
94
+ if len(output.shape) == 2:
95
+ output = output[:,:,None,None]
96
+ squeeze = True
97
+ else:
98
+ squeeze = False
99
+
100
+ h = output / self.scale - self.loc
101
+
102
+ if squeeze:
103
+ h = h.squeeze(-1).squeeze(-1)
104
+ return h
causalvideovae/model/modules/ops.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from einops import rearrange
3
+
4
+ def video_to_image(func):
5
+ def wrapper(self, x, *args, **kwargs):
6
+ if x.dim() == 5:
7
+ t = x.shape[2]
8
+ x = rearrange(x, "b c t h w -> (b t) c h w")
9
+ x = func(self, x, *args, **kwargs)
10
+ x = rearrange(x, "(b t) c h w -> b c t h w", t=t)
11
+ return x
12
+ return wrapper
13
+
14
+ def nonlinearity(x):
15
+ return x * torch.sigmoid(x)
16
+
17
+ def cast_tuple(t, length=1):
18
+ return t if isinstance(t, tuple) or isinstance(t, list) else ((t,) * length)
19
+
20
+ def shift_dim(x, src_dim=-1, dest_dim=-1, make_contiguous=True):
21
+ n_dims = len(x.shape)
22
+ if src_dim < 0:
23
+ src_dim = n_dims + src_dim
24
+ if dest_dim < 0:
25
+ dest_dim = n_dims + dest_dim
26
+ assert 0 <= src_dim < n_dims and 0 <= dest_dim < n_dims
27
+ dims = list(range(n_dims))
28
+ del dims[src_dim]
29
+ permutation = []
30
+ ctr = 0
31
+ for i in range(n_dims):
32
+ if i == dest_dim:
33
+ permutation.append(src_dim)
34
+ else:
35
+ permutation.append(dims[ctr])
36
+ ctr += 1
37
+ x = x.permute(permutation)
38
+ if make_contiguous:
39
+ x = x.contiguous()
40
+ return x
causalvideovae/model/refiner/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .modeling_refiner import Refiner
2
+
causalvideovae/model/utils/distrib_utils.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+ class DiagonalGaussianDistribution(object):
5
+ def __init__(self, parameters, deterministic=False):
6
+ self.parameters = parameters
7
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
8
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
9
+ self.deterministic = deterministic
10
+ self.std = torch.exp(0.5 * self.logvar)
11
+ self.var = torch.exp(self.logvar)
12
+ if self.deterministic:
13
+ self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
14
+
15
+ def sample(self):
16
+ x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
17
+ return x
18
+
19
+ def kl(self, other=None):
20
+ if self.deterministic:
21
+ return torch.Tensor([0.])
22
+ else:
23
+ if other is None:
24
+ return 0.5 * torch.sum(torch.pow(self.mean, 2)
25
+ + self.var - 1.0 - self.logvar,
26
+ dim=[1, 2, 3])
27
+ else:
28
+ return 0.5 * torch.sum(
29
+ torch.pow(self.mean - other.mean, 2) / other.var
30
+ + self.var / other.var - 1.0 - self.logvar + other.logvar,
31
+ dim=[1, 2, 3])
32
+
33
+ def nll(self, sample, dims=[1,2,3]):
34
+ if self.deterministic:
35
+ return torch.Tensor([0.])
36
+ logtwopi = np.log(2.0 * np.pi)
37
+ return 0.5 * torch.sum(
38
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
39
+ dim=dims)
40
+
41
+ def mode(self):
42
+ return self.mean
causalvideovae/model/utils/module_utils.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ Module = str
4
+ MODULES_BASE = "causalvideovae.model.modules."
5
+
6
+ def resolve_str_to_obj(str_val, append=True):
7
+ if append:
8
+ str_val = MODULES_BASE + str_val
9
+ module_name, class_name = str_val.rsplit('.', 1)
10
+ module = importlib.import_module(module_name)
11
+ return getattr(module, class_name)
12
+
13
+ def create_instance(module_class_str: str, **kwargs):
14
+ module_name, class_name = module_class_str.rsplit('.', 1)
15
+ module = importlib.import_module(module_name)
16
+ class_ = getattr(module, class_name)
17
+ return class_(**kwargs)
causalvideovae/utils/dataset_utils.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from einops import rearrange
3
+ import decord
4
+ from torch.nn import functional as F
5
+ import torch
6
+
7
+
8
+ IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG']
9
+
10
+ def is_image_file(filename):
11
+ return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
12
+
13
+ class DecordInit(object):
14
+ """Using Decord(https://github.com/dmlc/decord) to initialize the video_reader."""
15
+
16
+ def __init__(self, num_threads=1):
17
+ self.num_threads = num_threads
18
+ self.ctx = decord.cpu(0)
19
+
20
+ def __call__(self, filename):
21
+ """Perform the Decord initialization.
22
+ Args:
23
+ results (dict): The resulting dict to be modified and passed
24
+ to the next transform in pipeline.
25
+ """
26
+ reader = decord.VideoReader(filename,
27
+ ctx=self.ctx,
28
+ num_threads=self.num_threads)
29
+ return reader
30
+
31
+ def __repr__(self):
32
+ repr_str = (f'{self.__class__.__name__}('
33
+ f'sr={self.sr},'
34
+ f'num_threads={self.num_threads})')
35
+ return repr_str
36
+
37
+ def pad_to_multiple(number, ds_stride):
38
+ remainder = number % ds_stride
39
+ if remainder == 0:
40
+ return number
41
+ else:
42
+ padding = ds_stride - remainder
43
+ return number + padding
44
+
45
+ class Collate:
46
+ def __init__(self, args):
47
+ self.max_image_size = args.max_image_size
48
+ self.ae_stride = args.ae_stride
49
+ self.ae_stride_t = args.ae_stride_t
50
+ self.ae_stride_thw = (self.ae_stride_t, self.ae_stride, self.ae_stride)
51
+ self.ae_stride_1hw = (1, self.ae_stride, self.ae_stride)
52
+
53
+ self.patch_size = args.patch_size
54
+ self.patch_size_t = args.patch_size_t
55
+ self.patch_size_thw = (self.patch_size_t, self.patch_size, self.patch_size)
56
+ self.patch_size_1hw = (1, self.patch_size, self.patch_size)
57
+
58
+ self.num_frames = args.num_frames
59
+ self.use_image_num = args.use_image_num
60
+ self.max_thw = (self.num_frames, self.max_image_size, self.max_image_size)
61
+ self.max_1hw = (1, self.max_image_size, self.max_image_size)
62
+
63
+ def package(self, batch):
64
+ # import ipdb;ipdb.set_trace()
65
+ batch_tubes_vid = [i['video_data']['video'] for i in batch] # b [c t h w]
66
+ input_ids_vid = torch.stack([i['video_data']['input_ids'] for i in batch]) # b 1 l
67
+ cond_mask_vid = torch.stack([i['video_data']['cond_mask'] for i in batch]) # b 1 l
68
+ batch_tubes_img, input_ids_img, cond_mask_img = None, None, None
69
+ if self.use_image_num != 0:
70
+ batch_tubes_img = [j for i in batch for j in i['image_data']['image']] # b*num_img [c 1 h w]
71
+ input_ids_img = torch.stack([i['image_data']['input_ids'] for i in batch]) # b image_num l
72
+ cond_mask_img = torch.stack([i['image_data']['cond_mask'] for i in batch]) # b image_num l
73
+ return batch_tubes_vid, input_ids_vid, cond_mask_vid, batch_tubes_img, input_ids_img, cond_mask_img
74
+
75
+ def __call__(self, batch):
76
+ batch_tubes_vid, input_ids_vid, cond_mask_vid, batch_tubes_img, input_ids_img, cond_mask_img = self.package(batch)
77
+
78
+ # import ipdb;ipdb.set_trace()
79
+ ds_stride = self.ae_stride * self.patch_size
80
+ t_ds_stride = self.ae_stride_t * self.patch_size_t
81
+ if self.use_image_num == 0:
82
+ pad_batch_tubes, attention_mask = self.process(batch_tubes_vid, t_ds_stride, ds_stride,
83
+ self.max_thw, self.ae_stride_thw, self.patch_size_thw, extra_1=True)
84
+ # attention_mask: b t h w
85
+ input_ids, cond_mask = input_ids_vid.squeeze(1), cond_mask_vid.squeeze(1) # b 1 l -> b l
86
+ else:
87
+ pad_batch_tubes_vid, attention_mask_vid = self.process(batch_tubes_vid, t_ds_stride, ds_stride,
88
+ self.max_thw, self.ae_stride_thw, self.patch_size_thw, extra_1=True)
89
+ # attention_mask_vid: b t h w
90
+ pad_batch_tubes_img, attention_mask_img = self.process(batch_tubes_img, 1, ds_stride,
91
+ self.max_1hw, self.ae_stride_1hw, self.patch_size_1hw, extra_1=False)
92
+ pad_batch_tubes_img = rearrange(pad_batch_tubes_img, '(b i) c 1 h w -> b c i h w', i=self.use_image_num)
93
+ attention_mask_img = rearrange(attention_mask_img, '(b i) 1 h w -> b i h w', i=self.use_image_num)
94
+ pad_batch_tubes = torch.cat([pad_batch_tubes_vid, pad_batch_tubes_img], dim=2) # concat at temporal, video first
95
+ # attention_mask_img: b num_img h w
96
+ attention_mask = torch.cat([attention_mask_vid, attention_mask_img], dim=1) # b t+num_img h w
97
+ input_ids = torch.cat([input_ids_vid, input_ids_img], dim=1) # b 1+num_img hw
98
+ cond_mask = torch.cat([cond_mask_vid, cond_mask_img], dim=1) # b 1+num_img hw
99
+ return pad_batch_tubes, attention_mask, input_ids, cond_mask
100
+
101
+ def process(self, batch_tubes, t_ds_stride, ds_stride, max_thw, ae_stride_thw, patch_size_thw, extra_1):
102
+
103
+ # pad to max multiple of ds_stride
104
+ batch_input_size = [i.shape for i in batch_tubes] # [(c t h w), (c t h w)]
105
+ max_t, max_h, max_w = max_thw
106
+ pad_max_t, pad_max_h, pad_max_w = pad_to_multiple(max_t-1 if extra_1 else max_t, t_ds_stride), \
107
+ pad_to_multiple(max_h, ds_stride), \
108
+ pad_to_multiple(max_w, ds_stride)
109
+ pad_max_t = pad_max_t + 1 if extra_1 else pad_max_t
110
+ each_pad_t_h_w = [[pad_max_t - i.shape[1],
111
+ pad_max_h - i.shape[2],
112
+ pad_max_w - i.shape[3]] for i in batch_tubes]
113
+ pad_batch_tubes = [F.pad(im,
114
+ (0, pad_w,
115
+ 0, pad_h,
116
+ 0, pad_t), value=0) for (pad_t, pad_h, pad_w), im in zip(each_pad_t_h_w, batch_tubes)]
117
+ pad_batch_tubes = torch.stack(pad_batch_tubes, dim=0)
118
+
119
+ # make attention_mask
120
+ # first_channel_first_frame, first_channel_other_frame = pad_batch_tubes[:, :1, :1], pad_batch_tubes[:, :1, 1:] # first channel to make attention_mask
121
+ # attention_mask_first_frame = F.max_pool3d(first_channel_first_frame, kernel_size=(1, *ae_stride_thw[1:]), stride=(1, *ae_stride_thw[1:]))
122
+ # if first_channel_other_frame.numel() != 0:
123
+ # attention_mask_other_frame = F.max_pool3d(first_channel_other_frame, kernel_size=ae_stride_thw, stride=ae_stride_thw)
124
+ # attention_mask = torch.cat([attention_mask_first_frame, attention_mask_other_frame], dim=2)
125
+ # else:
126
+ # attention_mask = attention_mask_first_frame
127
+ # attention_mask_ = attention_mask[:, 0].bool().float() # b t h w, do not channel
128
+
129
+ # import ipdb;ipdb.set_trace()
130
+ max_tube_size = [pad_max_t, pad_max_h, pad_max_w]
131
+ max_latent_size = [((max_tube_size[0]-1) // ae_stride_thw[0] + 1) if extra_1 else (max_tube_size[0] // ae_stride_thw[0]),
132
+ max_tube_size[1] // ae_stride_thw[1],
133
+ max_tube_size[2] // ae_stride_thw[2]]
134
+ valid_latent_size = [[int(math.ceil((i[1]-1) / ae_stride_thw[0])) + 1 if extra_1 else int(math.ceil(i[1] / ae_stride_thw[0])),
135
+ int(math.ceil(i[2] / ae_stride_thw[1])),
136
+ int(math.ceil(i[3] / ae_stride_thw[2]))] for i in batch_input_size]
137
+ attention_mask = [F.pad(torch.ones(i),
138
+ (0, max_latent_size[2] - i[2],
139
+ 0, max_latent_size[1] - i[1],
140
+ 0, max_latent_size[0] - i[0]), value=0) for i in valid_latent_size]
141
+ attention_mask = torch.stack(attention_mask) # b t h w
142
+
143
+
144
+ # max_tube_size = [pad_max_t, pad_max_h, pad_max_w]
145
+ # max_latent_size = [((max_tube_size[0]-1) // ae_stride_thw[0] + 1) if extra_1 else (max_tube_size[0] // ae_stride_thw[0]),
146
+ # max_tube_size[1] // ae_stride_thw[1],
147
+ # max_tube_size[2] // ae_stride_thw[2]]
148
+ # max_patchify_latent_size = [((max_latent_size[0]-1) // patch_size_thw[0] + 1) if extra_1 else (max_latent_size[0] // patch_size_thw[0]),
149
+ # max_latent_size[1] // patch_size_thw[1],
150
+ # max_latent_size[2] // patch_size_thw[2]]
151
+ # valid_patchify_latent_size = [[int(math.ceil((i[1]-1) / t_ds_stride)) + 1 if extra_1 else int(math.ceil(i[1] / t_ds_stride)),
152
+ # int(math.ceil(i[2] / ds_stride)),
153
+ # int(math.ceil(i[3] / ds_stride))] for i in batch_input_size]
154
+ # attention_mask = [F.pad(torch.ones(i),
155
+ # (0, max_patchify_latent_size[2] - i[2],
156
+ # 0, max_patchify_latent_size[1] - i[1],
157
+ # 0, max_patchify_latent_size[0] - i[0]), value=0) for i in valid_patchify_latent_size]
158
+ # attention_mask = torch.stack(attention_mask) # b t h w
159
+
160
+ return pad_batch_tubes, attention_mask
causalvideovae/utils/utils.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import torch
4
+
5
+ import os
6
+ import math
7
+ import torch
8
+ import logging
9
+ import random
10
+ import subprocess
11
+ import numpy as np
12
+ import torch.distributed as dist
13
+
14
+ # from torch._six import inf
15
+ from torch import inf
16
+ from PIL import Image
17
+ from typing import Union, Iterable
18
+ from collections import OrderedDict
19
+ from torch.utils.tensorboard import SummaryWriter
20
+
21
+ from diffusers.utils import is_bs4_available, is_ftfy_available
22
+
23
+ import html
24
+ import re
25
+ import urllib.parse as ul
26
+
27
+ from torch.utils.data import Dataset
28
+ from torchvision.transforms import Lambda, Compose
29
+ from decord import VideoReader, cpu
30
+
31
+ from torchvision.transforms._transforms_video import CenterCropVideo
32
+ from torch.nn import functional as F
33
+ import cv2
34
+ import numpy.typing as npt
35
+
36
+ if is_bs4_available():
37
+ from bs4 import BeautifulSoup
38
+
39
+ if is_ftfy_available():
40
+ import ftfy
41
+
42
+ _tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]]
43
+
44
+ def find_model(model_name):
45
+ """
46
+ Finds a pre-trained Latte model, downloading it if necessary. Alternatively, loads a model from a local path.
47
+ """
48
+ assert os.path.isfile(model_name), f'Could not find Latte checkpoint at {model_name}'
49
+ checkpoint = torch.load(model_name, map_location=lambda storage, loc: storage)
50
+
51
+ # if "ema" in checkpoint: # supports checkpoints from train.py
52
+ # print('Using Ema!')
53
+ # checkpoint = checkpoint["ema"]
54
+ # else:
55
+ print('Using model!')
56
+ checkpoint = checkpoint['model']
57
+ return checkpoint
58
+
59
+ #################################################################################
60
+ # Training Clip Gradients #
61
+ #################################################################################
62
+ #import deepspeed
63
+ def print_grad_norm(model):
64
+ # 计算并打印梯度范数
65
+ # model_engine = accelerator.deepspeed_engine_wrapped.engine
66
+ # gradients = model_engine.get_gradients()
67
+ # grad_norm = get_grad_norm(gradients)
68
+ # 计算并打印梯度范数
69
+ grad_norm = 0
70
+ n_grad = 0
71
+ for name, param in model.named_parameters():
72
+ grad_data = deepspeed.utils.safe_get_full_grad(param)
73
+ # self.print_tensor_stats(grad_data, name=name)
74
+
75
+ if grad_data is not None:
76
+ param_norm = grad_data.norm(2)
77
+ grad_norm += param_norm.item() ** 2
78
+ n_grad += 1
79
+ grad_norm = (grad_norm / n_grad) ** (1. / 2)
80
+
81
+ # self.print_msg('=' * 50)
82
+ print(f'Gradient Norm is : {grad_norm}')
83
+
84
+ def get_grad_norm(
85
+ parameters: _tensor_or_tensors, norm_type: float = 2.0) -> torch.Tensor:
86
+ r"""
87
+ Copy from torch.nn.utils.clip_grad_norm_
88
+
89
+ Clips gradient norm of an iterable of parameters.
90
+
91
+ The norm is computed over all gradients together, as if they were
92
+ concatenated into a single vector. Gradients are modified in-place.
93
+
94
+ Args:
95
+ parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
96
+ single Tensor that will have gradients normalized
97
+ max_norm (float or int): max norm of the gradients
98
+ norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
99
+ infinity norm.
100
+ error_if_nonfinite (bool): if True, an error is thrown if the total
101
+ norm of the gradients from :attr:`parameters` is ``nan``,
102
+ ``inf``, or ``-inf``. Default: False (will switch to True in the future)
103
+
104
+ Returns:
105
+ Total norm of the parameter gradients (viewed as a single vector).
106
+ """
107
+ if isinstance(parameters, torch.Tensor):
108
+ parameters = [parameters]
109
+ grads = [p.grad for p in parameters if p.grad is not None]
110
+ norm_type = float(norm_type)
111
+ if len(grads) == 0:
112
+ return torch.tensor(0.)
113
+ device = grads[0].device
114
+ if norm_type == inf:
115
+ norms = [g.detach().abs().max().to(device) for g in grads]
116
+ total_norm = norms[0] if len(norms) == 1 else torch.max(torch.stack(norms))
117
+ else:
118
+ total_norm = torch.norm(torch.stack([torch.norm(g.detach(), norm_type).to(device) for g in grads]), norm_type)
119
+ return total_norm
120
+
121
+
122
+ def clip_grad_norm_(
123
+ parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0,
124
+ error_if_nonfinite: bool = False, clip_grad=True) -> torch.Tensor:
125
+ r"""
126
+ Copy from torch.nn.utils.clip_grad_norm_
127
+
128
+ Clips gradient norm of an iterable of parameters.
129
+
130
+ The norm is computed over all gradients together, as if they were
131
+ concatenated into a single vector. Gradients are modified in-place.
132
+
133
+ Args:
134
+ parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
135
+ single Tensor that will have gradients normalized
136
+ max_norm (float or int): max norm of the gradients
137
+ norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
138
+ infinity norm.
139
+ error_if_nonfinite (bool): if True, an error is thrown if the total
140
+ norm of the gradients from :attr:`parameters` is ``nan``,
141
+ ``inf``, or ``-inf``. Default: False (will switch to True in the future)
142
+
143
+ Returns:
144
+ Total norm of the parameter gradients (viewed as a single vector).
145
+ """
146
+ if isinstance(parameters, torch.Tensor):
147
+ parameters = [parameters]
148
+ grads = [p.grad for p in parameters if p.grad is not None]
149
+ max_norm = float(max_norm)
150
+ norm_type = float(norm_type)
151
+ if len(grads) == 0:
152
+ return torch.tensor(0.)
153
+ device = grads[0].device
154
+ if norm_type == inf:
155
+ norms = [g.detach().abs().max().to(device) for g in grads]
156
+ total_norm = norms[0] if len(norms) == 1 else torch.max(torch.stack(norms))
157
+ else:
158
+ total_norm = torch.norm(torch.stack([torch.norm(g.detach(), norm_type).to(device) for g in grads]), norm_type)
159
+
160
+ if clip_grad:
161
+ if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()):
162
+ raise RuntimeError(
163
+ f'The total norm of order {norm_type} for gradients from '
164
+ '`parameters` is non-finite, so it cannot be clipped. To disable '
165
+ 'this error and scale the gradients by the non-finite norm anyway, '
166
+ 'set `error_if_nonfinite=False`')
167
+ clip_coef = max_norm / (total_norm + 1e-6)
168
+ # Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so
169
+ # avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization
170
+ # when the gradients do not reside in CPU memory.
171
+ clip_coef_clamped = torch.clamp(clip_coef, max=1.0)
172
+ for g in grads:
173
+ g.detach().mul_(clip_coef_clamped.to(g.device))
174
+ # gradient_cliped = torch.norm(torch.stack([torch.norm(g.detach(), norm_type).to(device) for g in grads]), norm_type)
175
+ # print(gradient_cliped)
176
+ return total_norm
177
+
178
+
179
+ def get_experiment_dir(root_dir, args):
180
+ # if args.pretrained is not None and 'Latte-XL-2-256x256.pt' not in args.pretrained:
181
+ # root_dir += '-WOPRE'
182
+ if args.use_compile:
183
+ root_dir += '-Compile' # speedup by torch compile
184
+ if args.attention_mode:
185
+ root_dir += f'-{args.attention_mode.upper()}'
186
+ # if args.enable_xformers_memory_efficient_attention:
187
+ # root_dir += '-Xfor'
188
+ if args.gradient_checkpointing:
189
+ root_dir += '-Gc'
190
+ if args.mixed_precision:
191
+ root_dir += f'-{args.mixed_precision.upper()}'
192
+ root_dir += f'-{args.max_image_size}'
193
+ return root_dir
194
+
195
+ def get_precision(args):
196
+ if args.mixed_precision == "bf16":
197
+ dtype = torch.bfloat16
198
+ elif args.mixed_precision == "fp16":
199
+ dtype = torch.float16
200
+ else:
201
+ dtype = torch.float32
202
+ return dtype
203
+
204
+ #################################################################################
205
+ # Training Logger #
206
+ #################################################################################
207
+
208
+ def create_logger(logging_dir):
209
+ """
210
+ Create a logger that writes to a log file and stdout.
211
+ """
212
+ if dist.get_rank() == 0: # real logger
213
+ logging.basicConfig(
214
+ level=logging.INFO,
215
+ # format='[\033[34m%(asctime)s\033[0m] %(message)s',
216
+ format='[%(asctime)s] %(message)s',
217
+ datefmt='%Y-%m-%d %H:%M:%S',
218
+ handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")]
219
+ )
220
+ logger = logging.getLogger(__name__)
221
+
222
+ else: # dummy logger (does nothing)
223
+ logger = logging.getLogger(__name__)
224
+ logger.addHandler(logging.NullHandler())
225
+ return logger
226
+
227
+
228
+ def create_tensorboard(tensorboard_dir):
229
+ """
230
+ Create a tensorboard that saves losses.
231
+ """
232
+ if dist.get_rank() == 0: # real tensorboard
233
+ # tensorboard
234
+ writer = SummaryWriter(tensorboard_dir)
235
+
236
+ return writer
237
+
238
+
239
+ def write_tensorboard(writer, *args):
240
+ '''
241
+ write the loss information to a tensorboard file.
242
+ Only for pytorch DDP mode.
243
+ '''
244
+ if dist.get_rank() == 0: # real tensorboard
245
+ writer.add_scalar(args[0], args[1], args[2])
246
+
247
+
248
+ #################################################################################
249
+ # EMA Update/ DDP Training Utils #
250
+ #################################################################################
251
+
252
+ @torch.no_grad()
253
+ def update_ema(ema_model, model, decay=0.9999):
254
+ """
255
+ Step the EMA model towards the current model.
256
+ """
257
+ ema_params = OrderedDict(ema_model.named_parameters())
258
+ model_params = OrderedDict(model.named_parameters())
259
+
260
+ for name, param in model_params.items():
261
+ # TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed
262
+ ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
263
+
264
+
265
+ def requires_grad(model, flag=True):
266
+ """
267
+ Set requires_grad flag for all parameters in a model.
268
+ """
269
+ for p in model.parameters():
270
+ p.requires_grad = flag
271
+
272
+
273
+ def cleanup():
274
+ """
275
+ End DDP training.
276
+ """
277
+ dist.destroy_process_group()
278
+
279
+
280
+ def setup_distributed(backend="nccl", port=None):
281
+ """Initialize distributed training environment.
282
+ support both slurm and torch.distributed.launch
283
+ see torch.distributed.init_process_group() for more details
284
+ """
285
+ num_gpus = torch.cuda.device_count()
286
+
287
+ if "SLURM_JOB_ID" in os.environ:
288
+ rank = int(os.environ["SLURM_PROCID"])
289
+ world_size = int(os.environ["SLURM_NTASKS"])
290
+ node_list = os.environ["SLURM_NODELIST"]
291
+ addr = subprocess.getoutput(f"scontrol show hostname {node_list} | head -n1")
292
+ # specify master port
293
+ if port is not None:
294
+ os.environ["MASTER_PORT"] = str(port)
295
+ elif "MASTER_PORT" not in os.environ:
296
+ # os.environ["MASTER_PORT"] = "29566"
297
+ os.environ["MASTER_PORT"] = str(29567 + num_gpus)
298
+ if "MASTER_ADDR" not in os.environ:
299
+ os.environ["MASTER_ADDR"] = addr
300
+ os.environ["WORLD_SIZE"] = str(world_size)
301
+ os.environ["LOCAL_RANK"] = str(rank % num_gpus)
302
+ os.environ["RANK"] = str(rank)
303
+ else:
304
+ rank = int(os.environ["RANK"])
305
+ world_size = int(os.environ["WORLD_SIZE"])
306
+
307
+ # torch.cuda.set_device(rank % num_gpus)
308
+
309
+ dist.init_process_group(
310
+ backend=backend,
311
+ world_size=world_size,
312
+ rank=rank,
313
+ )
314
+
315
+
316
+ #################################################################################
317
+ # Testing Utils #
318
+ #################################################################################
319
+
320
+ def save_video_grid(video, nrow=None):
321
+ b, t, h, w, c = video.shape
322
+
323
+ if nrow is None:
324
+ nrow = math.ceil(math.sqrt(b))
325
+ ncol = math.ceil(b / nrow)
326
+ padding = 1
327
+ video_grid = torch.zeros((t, (padding + h) * nrow + padding,
328
+ (padding + w) * ncol + padding, c), dtype=torch.uint8)
329
+
330
+ print(video_grid.shape)
331
+ for i in range(b):
332
+ r = i // ncol
333
+ c = i % ncol
334
+ start_r = (padding + h) * r
335
+ start_c = (padding + w) * c
336
+ video_grid[:, start_r:start_r + h, start_c:start_c + w] = video[i]
337
+
338
+ return video_grid
339
+
340
+
341
+ #################################################################################
342
+ # MMCV Utils #
343
+ #################################################################################
344
+
345
+
346
+ def collect_env():
347
+ # Copyright (c) OpenMMLab. All rights reserved.
348
+ from mmcv.utils import collect_env as collect_base_env
349
+ from mmcv.utils import get_git_hash
350
+ """Collect the information of the running environments."""
351
+
352
+ env_info = collect_base_env()
353
+ env_info['MMClassification'] = get_git_hash()[:7]
354
+
355
+ for name, val in env_info.items():
356
+ print(f'{name}: {val}')
357
+
358
+ print(torch.cuda.get_arch_list())
359
+ print(torch.version.cuda)
360
+
361
+
362
+ #################################################################################
363
+ # Pixart-alpha Utils #
364
+ #################################################################################
365
+
366
+
367
+ bad_punct_regex = re.compile(r'['+'#®•©™&@·º½¾¿¡§~'+'\)'+'\('+'\]'+'\['+'\}'+'\{'+'\|'+'\\'+'\/'+'\*' + r']{1,}') # noqa
368
+
369
+ def text_preprocessing(text):
370
+ # The exact text cleaning as was in the training stage:
371
+ text = clean_caption(text)
372
+ text = clean_caption(text)
373
+ return text
374
+
375
+ def basic_clean(text):
376
+ text = ftfy.fix_text(text)
377
+ text = html.unescape(html.unescape(text))
378
+ return text.strip()
379
+
380
+ def clean_caption(caption):
381
+ caption = str(caption)
382
+ caption = ul.unquote_plus(caption)
383
+ caption = caption.strip().lower()
384
+ caption = re.sub('<person>', 'person', caption)
385
+ # urls:
386
+ caption = re.sub(
387
+ r'\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))', # noqa
388
+ '', caption) # regex for urls
389
+ caption = re.sub(
390
+ r'\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))', # noqa
391
+ '', caption) # regex for urls
392
+ # html:
393
+ caption = BeautifulSoup(caption, features='html.parser').text
394
+
395
+ # @<nickname>
396
+ caption = re.sub(r'@[\w\d]+\b', '', caption)
397
+
398
+ # 31C0—31EF CJK Strokes
399
+ # 31F0—31FF Katakana Phonetic Extensions
400
+ # 3200—32FF Enclosed CJK Letters and Months
401
+ # 3300—33FF CJK Compatibility
402
+ # 3400—4DBF CJK Unified Ideographs Extension A
403
+ # 4DC0—4DFF Yijing Hexagram Symbols
404
+ # 4E00—9FFF CJK Unified Ideographs
405
+ caption = re.sub(r'[\u31c0-\u31ef]+', '', caption)
406
+ caption = re.sub(r'[\u31f0-\u31ff]+', '', caption)
407
+ caption = re.sub(r'[\u3200-\u32ff]+', '', caption)
408
+ caption = re.sub(r'[\u3300-\u33ff]+', '', caption)
409
+ caption = re.sub(r'[\u3400-\u4dbf]+', '', caption)
410
+ caption = re.sub(r'[\u4dc0-\u4dff]+', '', caption)
411
+ caption = re.sub(r'[\u4e00-\u9fff]+', '', caption)
412
+ #######################################################
413
+
414
+ # все виды тире / all types of dash --> "-"
415
+ caption = re.sub(
416
+ r'[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+', # noqa
417
+ '-', caption)
418
+
419
+ # кавычки к одному стандарту
420
+ caption = re.sub(r'[`´«»“”¨]', '"', caption)
421
+ caption = re.sub(r'[‘’]', "'", caption)
422
+
423
+ # &quot;
424
+ caption = re.sub(r'&quot;?', '', caption)
425
+ # &amp
426
+ caption = re.sub(r'&amp', '', caption)
427
+
428
+ # ip adresses:
429
+ caption = re.sub(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', ' ', caption)
430
+
431
+ # article ids:
432
+ caption = re.sub(r'\d:\d\d\s+$', '', caption)
433
+
434
+ # \n
435
+ caption = re.sub(r'\\n', ' ', caption)
436
+
437
+ # "#123"
438
+ caption = re.sub(r'#\d{1,3}\b', '', caption)
439
+ # "#12345.."
440
+ caption = re.sub(r'#\d{5,}\b', '', caption)
441
+ # "123456.."
442
+ caption = re.sub(r'\b\d{6,}\b', '', caption)
443
+ # filenames:
444
+ caption = re.sub(r'[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)', '', caption)
445
+
446
+ #
447
+ caption = re.sub(r'[\"\']{2,}', r'"', caption) # """AUSVERKAUFT"""
448
+ caption = re.sub(r'[\.]{2,}', r' ', caption) # """AUSVERKAUFT"""
449
+
450
+ caption = re.sub(bad_punct_regex, r' ', caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
451
+ caption = re.sub(r'\s+\.\s+', r' ', caption) # " . "
452
+
453
+ # this-is-my-cute-cat / this_is_my_cute_cat
454
+ regex2 = re.compile(r'(?:\-|\_)')
455
+ if len(re.findall(regex2, caption)) > 3:
456
+ caption = re.sub(regex2, ' ', caption)
457
+
458
+ caption = basic_clean(caption)
459
+
460
+ caption = re.sub(r'\b[a-zA-Z]{1,3}\d{3,15}\b', '', caption) # jc6640
461
+ caption = re.sub(r'\b[a-zA-Z]+\d+[a-zA-Z]+\b', '', caption) # jc6640vc
462
+ caption = re.sub(r'\b\d+[a-zA-Z]+\d+\b', '', caption) # 6640vc231
463
+
464
+ caption = re.sub(r'(worldwide\s+)?(free\s+)?shipping', '', caption)
465
+ caption = re.sub(r'(free\s)?download(\sfree)?', '', caption)
466
+ caption = re.sub(r'\bclick\b\s(?:for|on)\s\w+', '', caption)
467
+ caption = re.sub(r'\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?', '', caption)
468
+ caption = re.sub(r'\bpage\s+\d+\b', '', caption)
469
+
470
+ caption = re.sub(r'\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b', r' ', caption) # j2d1a2a...
471
+
472
+ caption = re.sub(r'\b\d+\.?\d*[xх×]\d+\.?\d*\b', '', caption)
473
+
474
+ caption = re.sub(r'\b\s+\:\s+', r': ', caption)
475
+ caption = re.sub(r'(\D[,\./])\b', r'\1 ', caption)
476
+ caption = re.sub(r'\s+', ' ', caption)
477
+
478
+ caption.strip()
479
+
480
+ caption = re.sub(r'^[\"\']([\w\W]+)[\"\']$', r'\1', caption)
481
+ caption = re.sub(r'^[\'\_,\-\:;]', r'', caption)
482
+ caption = re.sub(r'[\'\_,\-\:\-\+]$', r'', caption)
483
+ caption = re.sub(r'^\.\S+$', '', caption)
484
+
485
+ return caption.strip()
486
+
487
+
488
+ #################################################################################
489
+ # eval PSNR when training #
490
+ #################################################################################
491
+
492
+ def resize(x, resolution):
493
+ height, width = x.shape[-2:]
494
+ aspect_ratio = width / height
495
+ if width <= height:
496
+ new_width = resolution
497
+ new_height = int(resolution / aspect_ratio)
498
+ else:
499
+ new_height = resolution
500
+ new_width = int(resolution * aspect_ratio)
501
+ resized_x = F.interpolate(x, size=(new_height, new_width), mode='bilinear', align_corners=True, antialias=True)
502
+ return resized_x
503
+
504
+ def _preprocess(video_data, short_size=128, crop_size=None):
505
+ transform = Compose(
506
+ [
507
+ Lambda(lambda x: (x / 255.0)*2-1),
508
+ Lambda(lambda x: resize(x, short_size)),
509
+ (
510
+ CenterCropVideo(crop_size=crop_size)
511
+ if crop_size is not None
512
+ else Lambda(lambda x: x)
513
+ ),
514
+ ]
515
+ )
516
+ video_outputs = transform(video_data)
517
+ video_outputs = _format_video_shape(video_outputs)
518
+ return video_outputs
519
+
520
+ def _format_video_shape(video, time_compress=4, spatial_compress=8):
521
+ time = video.shape[1]
522
+ height = video.shape[2]
523
+ width = video.shape[3]
524
+ new_time = (
525
+ (time - (time - 1) % time_compress)
526
+ if (time - 1) % time_compress != 0
527
+ else time
528
+ )
529
+ new_height = (
530
+ (height - (height) % spatial_compress)
531
+ if height % spatial_compress != 0
532
+ else height
533
+ )
534
+ new_width = (
535
+ (width - (width) % spatial_compress) if width % spatial_compress != 0 else width
536
+ )
537
+ return video[:, :new_time, :new_height, :new_width]
538
+
539
+ def array_to_video(
540
+ image_array: npt.NDArray, fps: float = 30.0, output_file: str = "output_video.mp4"
541
+ ) -> None:
542
+ height, width, channels = image_array[0].shape
543
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
544
+ video_writer = cv2.VideoWriter(output_file, fourcc, float(fps), (width, height))
545
+
546
+ for image in image_array:
547
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
548
+ video_writer.write(image_rgb)
549
+
550
+ video_writer.release()
551
+
552
+
553
+ def custom_to_video(
554
+ x: torch.Tensor, fps: float = 2.0, output_file: str = "output_video.mp4"
555
+ ) -> None:
556
+ x = x.detach().cpu()
557
+ x = torch.clamp(x, -1, 1)
558
+ x = (x + 1) / 2
559
+ x = x.permute(1, 2, 3, 0).float().numpy()
560
+ x = (255 * x).astype(np.uint8)
561
+ array_to_video(x, fps=fps, output_file=output_file)
562
+ return
563
+
564
+ class RealVideoDataset(Dataset):
565
+ def __init__(
566
+ self,
567
+ real_video_dir,
568
+ num_frames,
569
+ sample_rate=1,
570
+ crop_size=None,
571
+ resolution=128,
572
+ ) -> None:
573
+ super().__init__()
574
+ self.real_video_files = self._combine_without_prefix(real_video_dir)
575
+ self.num_frames = num_frames
576
+ self.sample_rate = sample_rate
577
+ self.crop_size = crop_size
578
+ self.short_size = resolution
579
+
580
+ def __len__(self):
581
+ return len(self.real_video_files)
582
+
583
+ def __getitem__(self, index):
584
+ if index >= len(self):
585
+ raise IndexError
586
+ real_video_file = self.real_video_files[index]
587
+ real_video_tensor = self._load_video(real_video_file)
588
+ video_name = os.path.basename(real_video_file)
589
+ return {'video': real_video_tensor, 'file_name': video_name }
590
+
591
+ def _load_video(self, video_path):
592
+ num_frames = self.num_frames
593
+ sample_rate = self.sample_rate
594
+ decord_vr = VideoReader(video_path, ctx=cpu(0))
595
+ total_frames = len(decord_vr)
596
+ sample_frames_len = sample_rate * num_frames
597
+
598
+ if total_frames > sample_frames_len:
599
+ s = 0
600
+ e = s + sample_frames_len
601
+ num_frames = num_frames
602
+ else:
603
+ s = 0
604
+ e = total_frames
605
+ num_frames = int(total_frames / sample_frames_len * num_frames)
606
+ print(
607
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
608
+ video_path,
609
+ total_frames,
610
+ )
611
+
612
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
613
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
614
+ video_data = torch.from_numpy(video_data)
615
+ video_data = video_data.permute(3, 0, 1, 2)
616
+ return _preprocess(
617
+ video_data, short_size=self.short_size, crop_size=self.crop_size
618
+ )
619
+
620
+ def _combine_without_prefix(self, folder_path, prefix="."):
621
+ folder = []
622
+ for name in os.listdir(folder_path):
623
+ if name[0] == prefix:
624
+ continue
625
+ folder.append(os.path.join(folder_path, name))
626
+ folder.sort()
627
+ return folder
scripts/CogVideovae_gen_video.sh ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NCCL setting
2
+ export GLOO_SOCKET_IFNAME=bond0
3
+ export NCCL_SOCKET_IFNAME=bond0
4
+ export NCCL_IB_HCA=mlx5_10:1,mlx5_11:1,mlx5_12:1,mlx5_13:1
5
+ export NCCL_IB_GID_INDEX=3
6
+ export NCCL_IB_TC=162
7
+ export NCCL_IB_TIMEOUT=22
8
+ export NCCL_PXN_DISABLE=0
9
+ export NCCL_IB_QPS_PER_CONNECTION=4
10
+ # export NCCL_ALGO=Ring
11
+ export OMP_NUM_THREADS=1
12
+ export MKL_NUM_THREADS=1
13
+ export NCCL_ALGO=Tree
14
+ REAL_DATASET_DIR=/storage/dataset/train
15
+ EXP_NAME=test_train
16
+ SAMPLE_RATE=1
17
+ NUM_FRAMES=65
18
+ RESOLUTION=720
19
+ SUBSET_SIZE=100
20
+ CKPT=/storage/clh/models/CogVideo
21
+ torchrun \
22
+ --nnodes=1 --nproc_per_node=8 \
23
+ --rdzv_endpoint=localhost:29504 \
24
+ --master_addr=localhost \
25
+ --master_port=29600 \
26
+ scripts/rec_CogVideo_vae.py \
27
+ --batch_size 1 \
28
+ --real_video_dir ${REAL_DATASET_DIR} \
29
+ --generated_video_dir /storage/clh/gen/CogVideo_vae/train\
30
+ --device cuda \
31
+ --sample_fps 24 \
32
+ --sample_rate ${SAMPLE_RATE} \
33
+ --num_frames ${NUM_FRAMES} \
34
+ --resolution ${RESOLUTION} \
35
+ --crop_size ${RESOLUTION} \
36
+ --num_workers 8 \
37
+ --ckpt ${CKPT} \
38
+ --output_origin \
39
+ #--enable_tiling \
scripts/causalvideovae_gen_video.sh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NCCL setting
2
+ export GLOO_SOCKET_IFNAME=bond0
3
+ export NCCL_SOCKET_IFNAME=bond0
4
+ export NCCL_IB_HCA=mlx5_10:1,mlx5_11:1,mlx5_12:1,mlx5_13:1
5
+ export NCCL_IB_GID_INDEX=3
6
+ export NCCL_IB_TC=162
7
+ export NCCL_IB_TIMEOUT=22
8
+ export NCCL_PXN_DISABLE=0
9
+ export NCCL_IB_QPS_PER_CONNECTION=4
10
+ # export NCCL_ALGO=Ring
11
+ export OMP_NUM_THREADS=1
12
+ export MKL_NUM_THREADS=1
13
+ export NCCL_ALGO=Tree
14
+ REAL_DATASET_DIR=/storage/dataset/vae_eval/panda70m
15
+ EXP_NAME=test_train
16
+ SAMPLE_RATE=1
17
+ NUM_FRAMES=33
18
+ RESOLUTION=256
19
+ SUBSET_SIZE=1000
20
+ CKPT=/storage/clh/models/488dim8
21
+
22
+ torchrun \
23
+ --nnodes=1 --nproc_per_node=8 \
24
+ --rdzv_endpoint=localhost:29504 \
25
+ --master_addr=localhost \
26
+ --master_port=29600 \
27
+ scripts/rec_causalvideo_vae.py \
28
+ --batch_size 1 \
29
+ --real_video_dir ${REAL_DATASET_DIR} \
30
+ --generated_video_dir /storage/clh/gen/488dim8 \
31
+ --device cuda \
32
+ --sample_fps 24 \
33
+ --sample_rate ${SAMPLE_RATE} \
34
+ --num_frames ${NUM_FRAMES} \
35
+ --resolution ${RESOLUTION} \
36
+ --crop_size ${RESOLUTION} \
37
+ --num_workers 8 \
38
+ --ckpt ${CKPT} \
39
+ --output_origin \
40
+ --subset_size 1000\
41
+ #--change_decoder \
42
+ #--decoder_dir /remote-home1/clh/Causal-Video-VAE/results/decoder_only-lr1.00e-05-bs1-rs248-sr2-fr25/checkpoint-5000.ckpt \
scripts/easyanimate_gen_video.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NCCL setting
2
+ export GLOO_SOCKET_IFNAME=bond0
3
+ export NCCL_SOCKET_IFNAME=bond0
4
+ export NCCL_IB_HCA=mlx5_10:1,mlx5_11:1,mlx5_12:1,mlx5_13:1
5
+ export NCCL_IB_GID_INDEX=3
6
+ export NCCL_IB_TC=162
7
+ export NCCL_IB_TIMEOUT=22
8
+ export NCCL_PXN_DISABLE=0
9
+ export NCCL_IB_QPS_PER_CONNECTION=4
10
+ # export NCCL_ALGO=Ring
11
+ export OMP_NUM_THREADS=1
12
+ export MKL_NUM_THREADS=1
13
+ export NCCL_ALGO=Tree
14
+ REAL_DATASET_DIR=/storage/dataset/train
15
+ EXP_NAME=test_train
16
+ SAMPLE_RATE=1
17
+ NUM_FRAMES=17
18
+ RESOLUTION=720
19
+ SUBSET_SIZE=100
20
+ CKPT=/storage/clh/models/488dim8
21
+
22
+ torchrun \
23
+ --nnodes=1 --nproc_per_node=8 \
24
+ --rdzv_endpoint=localhost:29503 \
25
+ --master_addr=localhost \
26
+ --master_port=29600 \
27
+ scripts/rec_easyanimate_vae.py \
28
+ --batch_size 1 \
29
+ --real_video_dir ${REAL_DATASET_DIR} \
30
+ --generated_video_dir /storage/clh/gen/easyanimate_vae/train \
31
+ --device cuda \
32
+ --sample_fps 24 \
33
+ --sample_rate ${SAMPLE_RATE} \
34
+ --num_frames ${NUM_FRAMES} \
35
+ --resolution ${RESOLUTION} \
36
+ --crop_size ${RESOLUTION} \
37
+ --num_workers 8 \
38
+ --ckpt ${CKPT} \
39
+ --output_origin \
40
+ #--enable_tiling \
scripts/rec_CogVideo_vae.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import argparse
3
+ import cv2
4
+ from tqdm import tqdm
5
+ import numpy as np
6
+ import numpy.typing as npt
7
+ import torch
8
+ import torch.distributed as dist
9
+ from torch.nn.parallel import DistributedDataParallel as DDP
10
+ from torch.utils.data import DataLoader, DistributedSampler, Subset
11
+ from decord import VideoReader, cpu
12
+ from torch.nn import functional as F
13
+ from pytorchvideo.transforms import ShortSideScale
14
+ from torchvision.transforms import Lambda, Compose
15
+ from torchvision.transforms._transforms_video import CenterCropVideo
16
+ import sys
17
+ from torch.utils.data import Dataset, DataLoader, Subset
18
+ import os
19
+ import glob
20
+ sys.path.append(".")
21
+ from diffusers import AutoencoderKLCogVideoX
22
+
23
+
24
+ def ddp_setup():
25
+ dist.init_process_group(backend="nccl")
26
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
27
+
28
+ def array_to_video(
29
+ image_array: npt.NDArray, fps: float = 30.0, output_file: str = "output_video.mp4"
30
+ ) -> None:
31
+ height, width, channels = image_array[0].shape
32
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
33
+ video_writer = cv2.VideoWriter(output_file, fourcc, float(fps), (width, height))
34
+ for image in image_array:
35
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
36
+ video_writer.write(image_rgb)
37
+
38
+ video_writer.release()
39
+
40
+
41
+ def custom_to_video(
42
+ x: torch.Tensor, fps: float = 2.0, output_file: str = "output_video.mp4"
43
+ ) -> None:
44
+ x = x.detach().cpu()
45
+ x = torch.clamp(x, -1, 1)
46
+ x = (x + 1) / 2
47
+ x = x.permute(1, 2, 3, 0).float().numpy()
48
+ x = (255 * x).astype(np.uint8)
49
+ array_to_video(x, fps=fps, output_file=output_file)
50
+ return
51
+
52
+
53
+ def read_video(video_path: str, num_frames: int, sample_rate: int) -> torch.Tensor:
54
+ decord_vr = VideoReader(video_path, ctx=cpu(0), num_threads=8)
55
+ total_frames = len(decord_vr)
56
+ sample_frames_len = sample_rate * num_frames
57
+
58
+ if total_frames > sample_frames_len:
59
+ s = 0
60
+ e = s + sample_frames_len
61
+ num_frames = num_frames
62
+ else:
63
+ s = 0
64
+ e = total_frames
65
+ num_frames = int(total_frames / sample_frames_len * num_frames)
66
+ print(
67
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
68
+ video_path,
69
+ total_frames,
70
+ )
71
+
72
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
73
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
74
+ video_data = torch.from_numpy(video_data)
75
+ video_data = video_data.permute(3, 0, 1, 2) # (T, H, W, C) -> (C, T, H, W)
76
+ return video_data
77
+
78
+
79
+ class RealVideoDataset(Dataset):
80
+ video_exts = ['avi', 'mp4', 'webm']
81
+
82
+ def __init__(
83
+ self,
84
+ real_video_dir,
85
+ num_frames,
86
+ sample_rate=1,
87
+ crop_size=None,
88
+ resolution=128,
89
+ ) -> None:
90
+ super().__init__()
91
+ self.real_video_files = self._combine_without_prefix(real_video_dir)
92
+ self.num_frames = num_frames
93
+ self.sample_rate = sample_rate
94
+ self.crop_size = crop_size
95
+ self.short_size = resolution
96
+
97
+ def __len__(self):
98
+ return len(self.real_video_files)
99
+
100
+ def __getitem__(self, index):
101
+ try:
102
+ if index >= len(self):
103
+ raise IndexError
104
+ real_video_file = self.real_video_files[index]
105
+ real_video_tensor = self._load_video(real_video_file)
106
+ video_name = os.path.basename(real_video_file)
107
+ except:
108
+ if index >= len(self):
109
+ raise IndexError
110
+ real_video_file = self.real_video_files[random.randint(1,index-1)]
111
+ real_video_tensor = self._load_video(real_video_file)
112
+ video_name = os.path.basename(real_video_file)
113
+ return {'video': real_video_tensor, 'file_name': video_name }
114
+
115
+ def _load_video(self, video_path):
116
+ num_frames = self.num_frames
117
+ sample_rate = self.sample_rate
118
+ decord_vr = VideoReader(video_path, ctx=cpu(0))
119
+ total_frames = len(decord_vr)
120
+ sample_frames_len = sample_rate * num_frames
121
+ s = 0
122
+ e = s + sample_frames_len
123
+ num_frames = num_frames
124
+ """
125
+ if total_frames > sample_frames_len:
126
+ s = 0
127
+ e = s + sample_frames_len
128
+ num_frames = num_frames
129
+
130
+ else:
131
+ s = 0
132
+ e = total_frames
133
+ num_frames = int(total_frames / sample_frames_len * num_frames)
134
+ print(
135
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
136
+ video_path,
137
+ total_frames,
138
+ )
139
+ """
140
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
141
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
142
+ video_data = torch.from_numpy(video_data)
143
+ video_data = video_data.permute(3, 0, 1, 2)
144
+ return _preprocess(
145
+ video_data, short_size=self.short_size, crop_size=self.crop_size
146
+ )
147
+
148
+ def _combine_without_prefix(self, folder_path):
149
+ samples = []
150
+ samples += sum([glob.glob(os.path.join(folder_path, '**', f'*.{ext}'), recursive=True)
151
+ for ext in self.video_exts], [])
152
+ samples.sort()
153
+ return samples
154
+
155
+ def resize(x, resolution):
156
+ height, width = x.shape[-2:]
157
+
158
+ aspect_ratio = width / height
159
+ if width <= height:
160
+ new_width = resolution
161
+ new_height = int(resolution / aspect_ratio)
162
+ else:
163
+ new_height = resolution
164
+ new_width = int(resolution * aspect_ratio)
165
+ resized_x = F.interpolate(x, size=(new_height, new_width), mode='bilinear', align_corners=True, antialias=True)
166
+ return resized_x
167
+
168
+ def _preprocess(video_data, short_size=128, crop_size=None):
169
+ transform = Compose(
170
+ [
171
+ Lambda(lambda x: ((x / 255.0) * 2 - 1)),
172
+
173
+ Lambda(lambda x: resize(x, short_size)),
174
+ (
175
+ CenterCropVideo(crop_size=crop_size)
176
+ if crop_size is not None
177
+ else Lambda(lambda x: x)
178
+ ),
179
+ ]
180
+
181
+ )
182
+ video_outputs = transform(video_data)
183
+ video_outputs = _format_video_shape(video_outputs)
184
+ return video_outputs
185
+
186
+
187
+ def _format_video_shape(video, time_compress=4, spatial_compress=8):
188
+ time = video.shape[1]
189
+ height = video.shape[2]
190
+ width = video.shape[3]
191
+ new_time = (
192
+ (time - (time - 1) % time_compress)
193
+ if (time - 1) % time_compress != 0
194
+ else time
195
+ )
196
+ new_height = (
197
+ (height - (height) % spatial_compress)
198
+ if height % spatial_compress != 0
199
+ else height
200
+ )
201
+ new_width = (
202
+ (width - (width) % spatial_compress) if width % spatial_compress != 0 else width
203
+ )
204
+ return video[:, :new_time, :new_height, :new_width]
205
+
206
+
207
+ @torch.no_grad()
208
+ def main(args: argparse.Namespace):
209
+ real_video_dir = args.real_video_dir
210
+ generated_video_dir = args.generated_video_dir
211
+ ckpt = args.ckpt
212
+ sample_rate = args.sample_rate
213
+ resolution = args.resolution
214
+ crop_size = args.crop_size
215
+ num_frames = args.num_frames
216
+ sample_rate = args.sample_rate
217
+ device = args.device
218
+ sample_fps = args.sample_fps
219
+ batch_size = args.batch_size
220
+ num_workers = args.num_workers
221
+ subset_size = args.subset_size
222
+
223
+ if not os.path.exists(args.generated_video_dir):
224
+ os.makedirs(os.path.join(generated_video_dir, "vae_gen/"), exist_ok=True)
225
+
226
+ data_type = torch.bfloat16
227
+
228
+ ddp_setup()
229
+ rank = int(os.environ["LOCAL_RANK"])
230
+
231
+ # ---- Load Model ----
232
+ device = args.device
233
+ vqvae = AutoencoderKLCogVideoX.from_pretrained(args.ckpt)
234
+ if args.change_decoder:
235
+ sd = torch.load(args.decoder_dir, map_location="cpu")
236
+
237
+ if "ema_state_dict" in sd and len(sd['ema_state_dict']) > 0 and os.environ.get("NOT_USE_EMA_MODEL", 0) == 0:
238
+ print("Load from ema model!")
239
+ sd = sd["ema_state_dict"]
240
+ sd = {key.replace("module.", ""): value for key, value in sd.items()}
241
+ elif "state_dict" in sd:
242
+ print("Load from normal model!")
243
+ if "gen_model" in sd["state_dict"]:
244
+ sd = sd["state_dict"]["gen_model"]
245
+ else:
246
+ sd = sd["state_dict"]
247
+ vqvae.load_state_dict(sd, strict=False)
248
+ #print(vqvae)
249
+ if args.enable_tiling:
250
+ vqvae.enable_tiling()
251
+ vqvae.tile_overlap_factor = args.tile_overlap_factor
252
+ vqvae = vqvae.to(rank).to(data_type)
253
+ vqvae.eval()
254
+
255
+ # ---- Load Model ----
256
+
257
+ # ---- Prepare Dataset ----
258
+ dataset = RealVideoDataset(
259
+ real_video_dir=real_video_dir,
260
+ num_frames=num_frames,
261
+ sample_rate=sample_rate,
262
+ crop_size=crop_size,
263
+ resolution=resolution,
264
+ )
265
+
266
+ if subset_size:
267
+ indices = range(subset_size)
268
+ dataset = Subset(dataset, indices=indices)
269
+ ddp_sampler = DistributedSampler(dataset)
270
+ dataloader = DataLoader(
271
+ dataset, batch_size=batch_size, sampler=ddp_sampler ,pin_memory=True, num_workers=num_workers
272
+ )
273
+ # ---- Prepare Dataset
274
+ # ---- Inference ----
275
+ for batch in tqdm(dataloader):
276
+ x, file_names = batch['video'], batch['file_name']
277
+
278
+ x = x.to(device=device, dtype=data_type) # b c t h w
279
+ video_recon = vqvae(x)[0]
280
+ for idx, video in enumerate(video_recon):
281
+ output_path = os.path.join(generated_video_dir, "vae_gen/", file_names[idx])
282
+ if args.output_origin:
283
+ os.makedirs(os.path.join(generated_video_dir, "origin/"), exist_ok=True)
284
+ origin_output_path = os.path.join(generated_video_dir, "origin/", file_names[idx])
285
+ custom_to_video(
286
+ x[idx], fps=sample_fps / sample_rate, output_file=origin_output_path
287
+ )
288
+ custom_to_video(
289
+ video, fps=sample_fps / sample_rate, output_file=output_path
290
+ )
291
+
292
+ # ---- Inference ----
293
+
294
+ if __name__ == "__main__":
295
+ parser = argparse.ArgumentParser()
296
+ parser.add_argument("--real_video_dir", type=str, default="")
297
+ parser.add_argument("--generated_video_dir", type=str, default="")
298
+ parser.add_argument("--decoder_dir", type=str, default="")
299
+ parser.add_argument("--ckpt", type=str, default="")
300
+ parser.add_argument("--sample_fps", type=int, default=30)
301
+ parser.add_argument("--resolution", type=int, default=336)
302
+ parser.add_argument("--crop_size", type=int, default=None)
303
+ parser.add_argument("--num_frames", type=int, default=17)
304
+ parser.add_argument("--sample_rate", type=int, default=1)
305
+ parser.add_argument("--batch_size", type=int, default=1)
306
+ parser.add_argument("--num_workers", type=int, default=8)
307
+ parser.add_argument("--subset_size", type=int, default=None)
308
+ parser.add_argument("--tile_overlap_factor", type=float, default=0.25)
309
+ parser.add_argument('--enable_tiling', action='store_true')
310
+ parser.add_argument('--output_origin', action='store_true')
311
+ parser.add_argument('--change_decoder', action='store_true')
312
+ parser.add_argument("--device", type=str, default="cuda")
313
+
314
+ args = parser.parse_args()
315
+ main(args)
scripts/rec_easyanimate_vae.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import argparse
3
+ import cv2
4
+ from tqdm import tqdm
5
+ import numpy as np
6
+ import numpy.typing as npt
7
+ import torch
8
+ import torch.distributed as dist
9
+ from torch.nn.parallel import DistributedDataParallel as DDP
10
+ from torch.utils.data import DataLoader, DistributedSampler, Subset
11
+ from decord import VideoReader, cpu
12
+ from torch.nn import functional as F
13
+ from pytorchvideo.transforms import ShortSideScale
14
+ from torchvision.transforms import Lambda, Compose
15
+ from torchvision.transforms._transforms_video import CenterCropVideo
16
+ import sys
17
+ from torch.utils.data import Dataset, DataLoader, Subset
18
+ import os
19
+ import glob
20
+ sys.path.append(".")
21
+ from diffusers import (AutoencoderKL, DDIMScheduler,
22
+ DPMSolverMultistepScheduler,
23
+ EulerAncestralDiscreteScheduler, EulerDiscreteScheduler,
24
+ PNDMScheduler)
25
+ from omegaconf import OmegaConf
26
+ from easyanimate.models.autoencoder_magvit import AutoencoderKLMagvit
27
+ from easyanimate.utils.lora_utils import merge_lora, unmerge_lora
28
+ from easyanimate.utils.utils import save_videos_grid, get_image_to_video_latent
29
+
30
+ def ddp_setup():
31
+ dist.init_process_group(backend="nccl")
32
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
33
+
34
+ def array_to_video(
35
+ image_array: npt.NDArray, fps: float = 30.0, output_file: str = "output_video.mp4"
36
+ ) -> None:
37
+ height, width, channels = image_array[0].shape
38
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
39
+ video_writer = cv2.VideoWriter(output_file, fourcc, float(fps), (width, height))
40
+ for image in image_array:
41
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
42
+ video_writer.write(image_rgb)
43
+
44
+ video_writer.release()
45
+
46
+
47
+ def custom_to_video(
48
+ x: torch.Tensor, fps: float = 2.0, output_file: str = "output_video.mp4"
49
+ ) -> None:
50
+ x = x.detach().cpu()
51
+ x = torch.clamp(x, -1, 1)
52
+ x = (x + 1) / 2
53
+ x = x.permute(1, 2, 3, 0).float().numpy()
54
+ x = (255 * x).astype(np.uint8)
55
+ array_to_video(x, fps=fps, output_file=output_file)
56
+ return
57
+
58
+
59
+ def read_video(video_path: str, num_frames: int, sample_rate: int) -> torch.Tensor:
60
+ decord_vr = VideoReader(video_path, ctx=cpu(0), num_threads=8)
61
+ total_frames = len(decord_vr)
62
+ sample_frames_len = sample_rate * num_frames
63
+
64
+ if total_frames > sample_frames_len:
65
+ s = 0
66
+ e = s + sample_frames_len
67
+ num_frames = num_frames
68
+ else:
69
+ s = 0
70
+ e = total_frames
71
+ num_frames = int(total_frames / sample_frames_len * num_frames)
72
+ print(
73
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
74
+ video_path,
75
+ total_frames,
76
+ )
77
+
78
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
79
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
80
+ video_data = torch.from_numpy(video_data)
81
+ video_data = video_data.permute(3, 0, 1, 2) # (T, H, W, C) -> (C, T, H, W)
82
+ return video_data
83
+
84
+
85
+ class RealVideoDataset(Dataset):
86
+ video_exts = ['avi', 'mp4', 'webm']
87
+
88
+ def __init__(
89
+ self,
90
+ real_video_dir,
91
+ num_frames,
92
+ sample_rate=1,
93
+ crop_size=None,
94
+ resolution=128,
95
+ ) -> None:
96
+ super().__init__()
97
+ self.real_video_files = self._combine_without_prefix(real_video_dir)
98
+ self.num_frames = num_frames
99
+ self.sample_rate = sample_rate
100
+ self.crop_size = crop_size
101
+ self.short_size = resolution
102
+
103
+ def __len__(self):
104
+ return len(self.real_video_files)
105
+
106
+ def __getitem__(self, index):
107
+ try:
108
+ if index >= len(self):
109
+ raise IndexError
110
+ real_video_file = self.real_video_files[index]
111
+ real_video_tensor = self._load_video(real_video_file)
112
+ video_name = os.path.basename(real_video_file)
113
+ except:
114
+ if index >= len(self):
115
+ raise IndexError
116
+ real_video_file = self.real_video_files[random.randint(1,index-1)]
117
+ real_video_tensor = self._load_video(real_video_file)
118
+ video_name = os.path.basename(real_video_file)
119
+ return {'video': real_video_tensor, 'file_name': video_name }
120
+
121
+ def _load_video(self, video_path):
122
+ num_frames = self.num_frames
123
+ sample_rate = self.sample_rate
124
+ decord_vr = VideoReader(video_path, ctx=cpu(0))
125
+ total_frames = len(decord_vr)
126
+ sample_frames_len = sample_rate * num_frames
127
+ s = 0
128
+ e = s + sample_frames_len
129
+ num_frames = num_frames
130
+ """
131
+ if total_frames > sample_frames_len:
132
+ s = 0
133
+ e = s + sample_frames_len
134
+ num_frames = num_frames
135
+
136
+ else:
137
+ s = 0
138
+ e = total_frames
139
+ num_frames = int(total_frames / sample_frames_len * num_frames)
140
+ print(
141
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
142
+ video_path,
143
+ total_frames,
144
+ )
145
+ """
146
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
147
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
148
+ video_data = torch.from_numpy(video_data)
149
+ video_data = video_data.permute(3, 0, 1, 2)
150
+ return _preprocess(
151
+ video_data, short_size=self.short_size, crop_size=self.crop_size
152
+ )
153
+
154
+ def _combine_without_prefix(self, folder_path):
155
+ samples = []
156
+ samples += sum([glob.glob(os.path.join(folder_path, '**', f'*.{ext}'), recursive=True)
157
+ for ext in self.video_exts], [])
158
+ samples.sort()
159
+ return samples
160
+
161
+ def resize(x, resolution):
162
+ height, width = x.shape[-2:]
163
+
164
+ aspect_ratio = width / height
165
+ if width <= height:
166
+ new_width = resolution
167
+ new_height = int(resolution / aspect_ratio)
168
+ else:
169
+ new_height = resolution
170
+ new_width = int(resolution * aspect_ratio)
171
+ resized_x = F.interpolate(x, size=(new_height, new_width), mode='bilinear', align_corners=True, antialias=True)
172
+ return resized_x
173
+
174
+ def _preprocess(video_data, short_size=128, crop_size=None):
175
+ transform = Compose(
176
+ [
177
+ Lambda(lambda x: ((x / 255.0) * 2 - 1)),
178
+
179
+ Lambda(lambda x: resize(x, short_size)),
180
+ (
181
+ CenterCropVideo(crop_size=crop_size)
182
+ if crop_size is not None
183
+ else Lambda(lambda x: x)
184
+ ),
185
+ ]
186
+
187
+ )
188
+ video_outputs = transform(video_data)
189
+ video_outputs = _format_video_shape(video_outputs)
190
+ return video_outputs
191
+
192
+
193
+ def _format_video_shape(video, time_compress=4, spatial_compress=8):
194
+ time = video.shape[1]
195
+ height = video.shape[2]
196
+ width = video.shape[3]
197
+ new_time = (
198
+ (time - (time - 1) % time_compress)
199
+ if (time - 1) % time_compress != 0
200
+ else time
201
+ )
202
+ new_height = (
203
+ (height - (height) % spatial_compress)
204
+ if height % spatial_compress != 0
205
+ else height
206
+ )
207
+ new_width = (
208
+ (width - (width) % spatial_compress) if width % spatial_compress != 0 else width
209
+ )
210
+ return video[:, :new_time, :new_height, :new_width]
211
+
212
+
213
+ @torch.no_grad()
214
+ def main(args: argparse.Namespace):
215
+ real_video_dir = args.real_video_dir
216
+ generated_video_dir = args.generated_video_dir
217
+ ckpt = args.ckpt
218
+ sample_rate = args.sample_rate
219
+ resolution = args.resolution
220
+ crop_size = args.crop_size
221
+ num_frames = args.num_frames
222
+ sample_rate = args.sample_rate
223
+ device = args.device
224
+ sample_fps = args.sample_fps
225
+ batch_size = args.batch_size
226
+ num_workers = args.num_workers
227
+ subset_size = args.subset_size
228
+
229
+ if not os.path.exists(args.generated_video_dir):
230
+ os.makedirs(os.path.join(generated_video_dir, "vae_gen/"), exist_ok=True)
231
+
232
+ data_type = torch.bfloat16
233
+
234
+ ddp_setup()
235
+ rank = int(os.environ["LOCAL_RANK"])
236
+
237
+ # ---- Load Model ----
238
+ device = args.device
239
+ config_path = "/storage/clh/Causal-Video-VAE/easyanimate/easyanimate_video_slicevae_motion_module_v3.yaml"
240
+ config = OmegaConf.load(config_path)
241
+ model_name = '/storage/clh/EasyAnimate/models/Diffusion_Transformer/EasyAnimateV3-XL-2-InP-512x512'
242
+ if OmegaConf.to_container(config['vae_kwargs'])['enable_magvit']:
243
+ Choosen_AutoencoderKL = AutoencoderKLMagvit
244
+ else:
245
+ Choosen_AutoencoderKL = AutoencoderKL
246
+ vqvae = Choosen_AutoencoderKL.from_pretrained(
247
+ model_name,
248
+ subfolder="vae",
249
+ )
250
+
251
+ #print(vqvae)
252
+ if args.enable_tiling:
253
+ vqvae.enable_tiling()
254
+ vqvae.tile_overlap_factor = args.tile_overlap_factor
255
+ vqvae = vqvae.to(rank).to(data_type)
256
+ vqvae.eval()
257
+
258
+ # ---- Load Model ----
259
+
260
+ # ---- Prepare Dataset ----
261
+ dataset = RealVideoDataset(
262
+ real_video_dir=real_video_dir,
263
+ num_frames=num_frames,
264
+ sample_rate=sample_rate,
265
+ crop_size=crop_size,
266
+ resolution=resolution,
267
+ )
268
+
269
+ if subset_size:
270
+ indices = range(subset_size)
271
+ dataset = Subset(dataset, indices=indices)
272
+ ddp_sampler = DistributedSampler(dataset)
273
+ dataloader = DataLoader(
274
+ dataset, batch_size=batch_size, sampler=ddp_sampler ,pin_memory=True, num_workers=num_workers
275
+ )
276
+ # ---- Prepare Dataset
277
+ # ---- Inference ----
278
+ for batch in tqdm(dataloader):
279
+ x, file_names = batch['video'], batch['file_name']
280
+
281
+ x = x.to(device=device, dtype=data_type) # b c t h
282
+ latents = vqvae.encode(x).latent_dist.sample().to(data_type)
283
+ print(latents.shape)
284
+ video_recon = vqvae.decode(latents)[0]
285
+ for idx, video in enumerate(video_recon):
286
+ output_path = os.path.join(generated_video_dir, "vae_gen/", file_names[idx])
287
+ if args.output_origin:
288
+ os.makedirs(os.path.join(generated_video_dir, "origin/"), exist_ok=True)
289
+ origin_output_path = os.path.join(generated_video_dir, "origin/", file_names[idx])
290
+ custom_to_video(
291
+ x[idx], fps=sample_fps / sample_rate, output_file=origin_output_path
292
+ )
293
+ custom_to_video(
294
+ video, fps=sample_fps / sample_rate, output_file=output_path
295
+ )
296
+
297
+ # ---- Inference ----
298
+
299
+ if __name__ == "__main__":
300
+ parser = argparse.ArgumentParser()
301
+ parser.add_argument("--real_video_dir", type=str, default="")
302
+ parser.add_argument("--generated_video_dir", type=str, default="")
303
+ parser.add_argument("--decoder_dir", type=str, default="")
304
+ parser.add_argument("--ckpt", type=str, default="")
305
+ parser.add_argument("--sample_fps", type=int, default=30)
306
+ parser.add_argument("--resolution", type=int, default=336)
307
+ parser.add_argument("--crop_size", type=int, default=None)
308
+ parser.add_argument("--num_frames", type=int, default=17)
309
+ parser.add_argument("--sample_rate", type=int, default=1)
310
+ parser.add_argument("--batch_size", type=int, default=1)
311
+ parser.add_argument("--num_workers", type=int, default=8)
312
+ parser.add_argument("--subset_size", type=int, default=None)
313
+ parser.add_argument("--tile_overlap_factor", type=float, default=0.25)
314
+ parser.add_argument('--enable_tiling', action='store_true')
315
+ parser.add_argument('--output_origin', action='store_true')
316
+ parser.add_argument('--change_decoder', action='store_true')
317
+ parser.add_argument("--device", type=str, default="cuda")
318
+
319
+ args = parser.parse_args()
320
+ main(args)
321
+
scripts/rec_vqgan_vae.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import argparse
3
+ import cv2
4
+ from tqdm import tqdm
5
+ import numpy as np
6
+ import numpy.typing as npt
7
+ import torch
8
+ import torch.distributed as dist
9
+ from torch.nn.parallel import DistributedDataParallel as DDP
10
+ from torch.utils.data import DataLoader, DistributedSampler, Subset
11
+ from decord import VideoReader, cpu
12
+ from torch.nn import functional as F
13
+ from pytorchvideo.transforms import ShortSideScale
14
+ from torchvision.transforms import Lambda, Compose
15
+ from torchvision.transforms._transforms_video import CenterCropVideo
16
+ import sys
17
+ from torch.utils.data import Dataset, DataLoader, Subset
18
+ import os
19
+ import glob
20
+ sys.path.append(".")
21
+ import torch.nn as nn
22
+ import yaml
23
+ from omegaconf import OmegaConf
24
+ from einops import rearrange
25
+ from taming.models.vqgan import VQModel, GumbelVQ
26
+
27
+ def ddp_setup():
28
+ dist.init_process_group(backend="nccl")
29
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
30
+
31
+ def array_to_video(
32
+ image_array: npt.NDArray, fps: float = 30.0, output_file: str = "output_video.mp4"
33
+ ) -> None:
34
+ height, width, channels = image_array[0].shape
35
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
36
+ video_writer = cv2.VideoWriter(output_file, fourcc, float(fps), (width, height))
37
+
38
+ for image in image_array:
39
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
40
+ video_writer.write(image_rgb)
41
+
42
+ video_writer.release()
43
+
44
+
45
+ def custom_to_video(
46
+ x: torch.Tensor, fps: float = 2.0, output_file: str = "output_video.mp4"
47
+ ) -> None:
48
+ x = x.detach().cpu()
49
+ x = torch.clamp(x, -1, 1)
50
+ x = (x + 1) / 2
51
+ x = x.permute(1, 2, 3, 0).float().numpy()
52
+ x = (255 * x).astype(np.uint8)
53
+ array_to_video(x, fps=fps, output_file=output_file)
54
+ return
55
+
56
+
57
+ def read_video(video_path: str, num_frames: int, sample_rate: int) -> torch.Tensor:
58
+ decord_vr = VideoReader(video_path, ctx=cpu(0), num_threads=8)
59
+ total_frames = len(decord_vr)
60
+ sample_frames_len = sample_rate * num_frames
61
+
62
+ if total_frames > sample_frames_len:
63
+ s = 0
64
+ e = s + sample_frames_len
65
+ num_frames = num_frames
66
+ else:
67
+ s = 0
68
+ e = total_frames
69
+ num_frames = int(total_frames / sample_frames_len * num_frames)
70
+ print(
71
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
72
+ video_path,
73
+ total_frames,
74
+ )
75
+
76
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
77
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
78
+ video_data = torch.from_numpy(video_data)
79
+ video_data = video_data.permute(3, 0, 1, 2) # (T, H, W, C) -> (C, T, H, W)
80
+ return video_data
81
+
82
+
83
+ class RealVideoDataset(Dataset):
84
+ video_exts = ['avi', 'mp4', 'webm']
85
+
86
+ def __init__(
87
+ self,
88
+ real_video_dir,
89
+ num_frames,
90
+ sample_rate=1,
91
+ crop_size=None,
92
+ resolution=128,
93
+ ) -> None:
94
+ super().__init__()
95
+ self.real_video_files = self._combine_without_prefix(real_video_dir)
96
+ self.num_frames = num_frames
97
+ self.sample_rate = sample_rate
98
+ self.crop_size = crop_size
99
+ self.short_size = resolution
100
+
101
+ def __len__(self):
102
+ return len(self.real_video_files)
103
+
104
+ def __getitem__(self, index):
105
+ try:
106
+ if index >= len(self):
107
+ raise IndexError
108
+ real_video_file = self.real_video_files[index]
109
+ real_video_tensor = self._load_video(real_video_file)
110
+ video_name = os.path.basename(real_video_file)
111
+ except:
112
+ if index >= len(self):
113
+ raise IndexError
114
+ real_video_file = self.real_video_files[random.randint(1,index-1)]
115
+ real_video_tensor = self._load_video(real_video_file)
116
+ video_name = os.path.basename(real_video_file)
117
+ return {'video': real_video_tensor, 'file_name': video_name }
118
+
119
+ def _load_video(self, video_path):
120
+ num_frames = self.num_frames
121
+ sample_rate = self.sample_rate
122
+ decord_vr = VideoReader(video_path, ctx=cpu(0))
123
+ total_frames = len(decord_vr)
124
+ sample_frames_len = sample_rate * num_frames
125
+ s = 0
126
+ e = s + sample_frames_len
127
+ num_frames = num_frames
128
+ """
129
+ if total_frames > sample_frames_len:
130
+ s = 0
131
+ e = s + sample_frames_len
132
+ num_frames = num_frames
133
+
134
+ else:
135
+ s = 0
136
+ e = total_frames
137
+ num_frames = int(total_frames / sample_frames_len * num_frames)
138
+ print(
139
+ f"sample_frames_len {sample_frames_len}, only can sample {num_frames * sample_rate}",
140
+ video_path,
141
+ total_frames,
142
+ )
143
+ """
144
+ frame_id_list = np.linspace(s, e - 1, num_frames, dtype=int)
145
+ video_data = decord_vr.get_batch(frame_id_list).asnumpy()
146
+ video_data = torch.from_numpy(video_data)
147
+ video_data = video_data.permute(3, 0, 1, 2)
148
+ return _preprocess(
149
+ video_data, short_size=self.short_size, crop_size=self.crop_size
150
+ )
151
+
152
+ def _combine_without_prefix(self, folder_path):
153
+ samples = []
154
+ samples += sum([glob.glob(os.path.join(folder_path, '**', f'*.{ext}'), recursive=True)
155
+ for ext in self.video_exts], [])
156
+ samples.sort()
157
+ return samples
158
+
159
+ def resize(x, resolution):
160
+ height, width = x.shape[-2:]
161
+ aspect_ratio = width / height
162
+ if width <= height:
163
+ new_width = resolution
164
+ new_height = int(resolution / aspect_ratio)
165
+ else:
166
+ new_height = resolution
167
+ new_width = int(resolution * aspect_ratio)
168
+ resized_x = F.interpolate(x, size=(new_height, new_width), mode='bilinear', align_corners=True, antialias=True)
169
+ return resized_x
170
+
171
+ def _preprocess(video_data, short_size=128, crop_size=None):
172
+ transform = Compose(
173
+
174
+ [
175
+
176
+ Lambda(lambda x: ((x / 255.0) * 2 - 1)),
177
+ Lambda(lambda x: resize(x, short_size)),
178
+ (
179
+ CenterCropVideo(crop_size=crop_size)
180
+ if crop_size is not None
181
+ else Lambda(lambda x: x)
182
+ ),
183
+
184
+ ]
185
+
186
+ )
187
+ video_outputs = transform(video_data)
188
+ video_outputs = _format_video_shape(video_outputs)
189
+ return video_outputs
190
+
191
+
192
+ def _format_video_shape(video, time_compress=4, spatial_compress=8):
193
+ time = video.shape[1]
194
+ height = video.shape[2]
195
+ width = video.shape[3]
196
+ new_time = (
197
+ (time - (time - 1) % time_compress)
198
+ if (time - 1) % time_compress != 0
199
+ else time
200
+ )
201
+ new_height = (
202
+ (height - (height) % spatial_compress)
203
+ if height % spatial_compress != 0
204
+ else height
205
+ )
206
+ new_width = (
207
+ (width - (width) % spatial_compress) if width % spatial_compress != 0 else width
208
+ )
209
+ return video[:, :new_time, :new_height, :new_width]
210
+
211
+ def load_config(config_path, display=False):
212
+ config = OmegaConf.load(config_path)
213
+ if display:
214
+ print(yaml.dump(OmegaConf.to_container(config)))
215
+ return config
216
+
217
+ def load_vqgan(config, ckpt_path=None, is_gumbel=False):
218
+ if is_gumbel:
219
+ model = GumbelVQ(**config.model.params)
220
+ else:
221
+ model = VQModel(**config.model.params)
222
+ if ckpt_path is not None:
223
+ sd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
224
+ missing, unexpected = model.load_state_dict(sd, strict=False)
225
+ return model.eval()
226
+
227
+ @torch.no_grad()
228
+ def main(args: argparse.Namespace):
229
+ real_video_dir = args.real_video_dir
230
+ generated_video_dir = args.generated_video_dir
231
+ ckpt = args.ckpt
232
+ config = args.config
233
+ sample_rate = args.sample_rate
234
+ resolution = args.resolution
235
+ crop_size = args.crop_size
236
+ num_frames = args.num_frames
237
+ sample_rate = args.sample_rate
238
+ sample_fps = args.sample_fps
239
+ batch_size = args.batch_size
240
+ num_workers = args.num_workers
241
+ subset_size = args.subset_size
242
+
243
+ if not os.path.exists(args.generated_video_dir):
244
+ os.makedirs(os.path.join(generated_video_dir, "vae_gen/"), exist_ok=True)
245
+
246
+ data_type = torch.bfloat16
247
+
248
+ ddp_setup()
249
+ rank = int(os.environ["LOCAL_RANK"])
250
+
251
+ # ---- Load Model ----
252
+ vqgan_config = load_config(config, display=False)
253
+ vqgan = load_vqgan(vqgan_config, ckpt_path=ckpt, is_gumbel=True)
254
+ print(vqgan)
255
+ vqgan = vqgan.to(rank).to(data_type)
256
+ vqgan.eval()
257
+ # ---- Load Model ----
258
+
259
+ # ---- Prepare Dataset ----
260
+ dataset = RealVideoDataset(
261
+ real_video_dir=real_video_dir,
262
+ num_frames=num_frames,
263
+ sample_rate=sample_rate,
264
+ crop_size=crop_size,
265
+ resolution=resolution,
266
+ )
267
+
268
+ if subset_size:
269
+ indices = range(subset_size)
270
+ dataset = Subset(dataset, indices=indices)
271
+ ddp_sampler = DistributedSampler(dataset)
272
+ dataloader = DataLoader(
273
+ dataset, batch_size=batch_size, sampler=ddp_sampler ,pin_memory=True, num_workers=num_workers
274
+ )
275
+ # ---- Prepare Dataset
276
+
277
+ # ---- Inference ----
278
+ for batch in tqdm(dataloader):
279
+ x, file_names = batch['video'], batch['file_name']
280
+
281
+ x = x.to(rank).to(data_type) # b c t h w
282
+ t = x.shape[2]
283
+ x = rearrange(x, "b c t h w -> (b t) c h w", t=t)
284
+ latents, _, [_, _, indices] = vqgan.encode(x)
285
+ video_recon = vqgan.decode(latents.to(data_type))
286
+ video_recon = rearrange(video_recon, "(b t) c h w -> b c t h w", t=t)
287
+ x = rearrange(x, "(b t) c h w -> b c t h w", t=t)
288
+ for idx, video in enumerate(video_recon):
289
+ output_path = os.path.join(generated_video_dir, "vae_gen/", file_names[idx])
290
+ if args.output_origin:
291
+ os.makedirs(os.path.join(generated_video_dir, "origin/"), exist_ok=True)
292
+ origin_output_path = os.path.join(generated_video_dir, "origin/", file_names[idx])
293
+ custom_to_video(
294
+ x[idx], fps=sample_fps / sample_rate, output_file=origin_output_path
295
+ )
296
+ custom_to_video(
297
+ video, fps=sample_fps / sample_rate, output_file=output_path
298
+ )
299
+ # ---- Inference ----
300
+
301
+ if __name__ == "__main__":
302
+ parser = argparse.ArgumentParser()
303
+ parser.add_argument("--real_video_dir", type=str, default="")
304
+ parser.add_argument("--generated_video_dir", type=str, default="")
305
+ parser.add_argument("--ckpt", type=str, default="")
306
+ parser.add_argument("--sample_fps", type=int, default=30)
307
+ parser.add_argument("--resolution", type=int, default=336)
308
+ parser.add_argument("--crop_size", type=int, default=None)
309
+ parser.add_argument("--num_frames", type=int, default=17)
310
+ parser.add_argument("--sample_rate", type=int, default=1)
311
+ parser.add_argument("--batch_size", type=int, default=1)
312
+ parser.add_argument("--num_workers", type=int, default=8)
313
+ parser.add_argument("--subset_size", type=int, default=None)
314
+ parser.add_argument('--output_origin', action='store_true')
315
+ parser.add_argument("--config", type=str, default="")
316
+
317
+ args = parser.parse_args()
318
+ main(args)
scripts/refine_video.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=1
2
+ VAE_DATASET_DIR=/remote-home1/clh/test/vae
3
+ EXP_NAME=test_train
4
+ SAMPLE_RATE=1
5
+ NUM_FRAMES=65
6
+ RESOLUTION=512
7
+ SUBSET_SIZE=1
8
+ CKPT=/remote-home1/clh/Causal-Video-VAE/results/refiner-h64-122-Res6-fjunresizefromistock150k-lr1.00e-05-bs1-rs256-sr1-fr23
9
+
10
+ python scripts/refine_video.py \
11
+ --batch_size 1 \
12
+ --real_video_dir ${VAE_DATASET_DIR} \
13
+ --generated_video_dir /remote-home1/clh/test/refiner-h64-122-Res6-fjunres\
14
+ --device cuda \
15
+ --sample_fps 24 \
16
+ --sample_rate ${SAMPLE_RATE} \
17
+ --num_frames ${NUM_FRAMES} \
18
+ --resolution ${RESOLUTION} \
19
+ --crop_size ${RESOLUTION} \
20
+ --num_workers 8 \
21
+ --ckpt ${CKPT}
scripts/sd2_1_gen_video.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
2
+ export NCCL_DEBUG=INFO
3
+ export NCCL_SOCKET_IFNAME=ibs11
4
+ export NCCL_IB_DISABLE=1
5
+ REAL_DATASET_DIR=/remote-home1/clh/dataset/panda70m_val
6
+ EXP_NAME=test_train
7
+ SAMPLE_RATE=1
8
+ NUM_FRAMES=33
9
+ RESOLUTION=256
10
+ SUBSET_SIZE=100
11
+ CKPT=/remote-home1/clh/sd2_1
12
+
13
+ torchrun \
14
+ --nnodes=1 --nproc_per_node=8 \
15
+ --rdzv_backend=c10d \
16
+ --rdzv_endpoint=localhost:29508 \
17
+ --master_addr=localhost \
18
+ --master_port=29600 \
19
+ scripts/rec_sd2_1_vae.py \
20
+ --batch_size 1 \
21
+ --real_video_dir ${REAL_DATASET_DIR} \
22
+ --generated_video_dir /remote-home1/clh/gen/sd2_1/panda70m \
23
+ --sample_fps 24 \
24
+ --sample_rate ${SAMPLE_RATE} \
25
+ --num_frames ${NUM_FRAMES} \
26
+ --resolution ${RESOLUTION} \
27
+ --crop_size ${RESOLUTION} \
28
+ --num_workers 8 \
29
+ --ckpt ${CKPT} \
30
+ --output_origin \
scripts/svd_gen_video.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
2
+ export NCCL_DEBUG=INFO
3
+ export NCCL_SOCKET_IFNAME=ibs11
4
+ export NCCL_IB_DISABLE=1
5
+ REAL_DATASET_DIR=/remote-home1/clh/dataset/panda70m_val
6
+ EXP_NAME=test_train
7
+ SAMPLE_RATE=1
8
+ NUM_FRAMES=33
9
+ RESOLUTION=256
10
+ SUBSET_SIZE=100
11
+ CKPT=/remote-home1/clh/svd
12
+
13
+ torchrun \
14
+ --nnodes=1 --nproc_per_node=8 \
15
+ --rdzv_backend=c10d \
16
+ --rdzv_endpoint=localhost:29509 \
17
+ --master_addr=localhost \
18
+ --master_port=29600 \
19
+ scripts/rec_svd_vae.py \
20
+ --batch_size 8 \
21
+ --real_video_dir ${REAL_DATASET_DIR} \
22
+ --generated_video_dir /remote-home1/clh/gen/svd/panda70m \
23
+ --sample_fps 24 \
24
+ --sample_rate ${SAMPLE_RATE} \
25
+ --num_frames ${NUM_FRAMES} \
26
+ --resolution ${RESOLUTION} \
27
+ --crop_size ${RESOLUTION} \
28
+ --num_workers 8 \
29
+ --ckpt ${CKPT} \
30
+ --output_origin \
scripts/tats_gen_video.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
2
+ export NCCL_DEBUG=INFO
3
+ export NCCL_SOCKET_IFNAME=ibs11
4
+ export NCCL_IB_DISABLE=1
5
+ REAL_DATASET_DIR=/remote-home1/clh/dataset/panda70m_val
6
+ EXP_NAME=test_train
7
+ SAMPLE_RATE=1
8
+ NUM_FRAMES=32
9
+ RESOLUTION=256
10
+ SUBSET_SIZE=100
11
+ CKPT=/remote-home1/clh/TATS/vqgan_sky_128_488_epoch_12-step_29999-train.ckpt
12
+
13
+ torchrun \
14
+ --nnodes=1 --nproc_per_node=8 \
15
+ --rdzv_backend=c10d \
16
+ --rdzv_endpoint=localhost:29501 \
17
+ --master_addr=localhost \
18
+ --master_port=29600 \
19
+ scripts/rec_TATS_vae.py \
20
+ --batch_size 1 \
21
+ --real_video_dir ${REAL_DATASET_DIR} \
22
+ --generated_video_dir /remote-home1/clh/gen/TATS/panda70m \
23
+ --sample_fps 24 \
24
+ --sample_rate ${SAMPLE_RATE} \
25
+ --num_frames ${NUM_FRAMES} \
26
+ --resolution ${RESOLUTION} \
27
+ --crop_size ${RESOLUTION} \
28
+ --num_workers 8 \
29
+ --ckpt ${CKPT} \
30
+ --output_origin \
scripts/train_ddp.sh ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export https_proxy=127.0.0.1:7890
2
+ export http_proxy=127.0.0.1:7890
3
+ export GLOO_SOCKET_IFNAME=bond0
4
+ export NCCL_SOCKET_IFNAME=bond0
5
+ export NCCL_IB_HCA=mlx5_10:1,mlx5_11:1,mlx5_12:1,mlx5_13:1
6
+ export NCCL_IB_GID_INDEX=3
7
+ export NCCL_IB_TC=162
8
+ export NCCL_IB_TIMEOUT=22
9
+ export NCCL_PXN_DISABLE=0
10
+ export NCCL_IB_QPS_PER_CONNECTION=4
11
+ # export NCCL_ALGO=Ring
12
+ export OMP_NUM_THREADS=1
13
+ export MKL_NUM_THREADS=1
14
+ export NCCL_ALGO=Tree
15
+ EXP_NAME=464641024layerNorm-reusuem1
16
+
17
+ torchrun \
18
+ --nnodes=1 --nproc_per_node=6 \
19
+ --rdzv_endpoint=localhost:29503 \
20
+ --master_addr=localhost \
21
+ --master_port=29600 \
22
+ train_ddp.py \
23
+ --exp_name ${EXP_NAME} \
24
+ --pretrained_model_name_or_path /storage/clh/Causal-Video-VAE/results/464641024layerNorm-lr1.00e-05-bs1-rs256-sr2-fr25/r\
25
+ --video_path /storage/dataset/pexels \
26
+ --eval_video_path /storage/dataset/vae_eval/webvid \
27
+ --resolution 256 \
28
+ --num_frames 25 \
29
+ --batch_size 1 \
30
+ --disc_start 5000 \
31
+ --save_ckpt_step 1000 \
32
+ --eval_steps 1000 \
33
+ --eval_batch_size 1 \
34
+ --eval_num_frames 33 \
35
+ --eval_sample_rate 1 \
36
+ --eval_subset_size 100 \
37
+ --eval_lpips \
38
+ --ema \
39
+ --ema_decay 0.999 \
40
+ --perceptual_weight 1.0 \
41
+ --loss_type l1 \
42
+ --disc_cls causalvideovae.model.losses.LPIPSWithDiscriminator3D
scripts/train_ddp_refiner.sh ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # export https_proxy=http://127.0.0.1:8998
2
+ # export http_proxy=http://127.0.0.1:8998
3
+ unset https_proxy
4
+ unset http_proxy
5
+ export WANDB_PROJECT=causalvideovae
6
+ export CUDA_VISIBLE_DEVICES=6,7
7
+ export NCCL_DEBUG=INFO
8
+ export NCCL_SOCKET_IFNAME=ibs11
9
+ export NCCL_IB_DISABLE=1
10
+
11
+ EXP_NAME=refiner-h64-122-Res6-fjunresizefromistock150k
12
+
13
+ torchrun \
14
+ --nnodes=1 --nproc_per_node=2 \
15
+ --rdzv_backend=c10d \
16
+ --rdzv_endpoint=localhost:29507 \
17
+ --master_addr=localhost \
18
+ --master_port=29600 \
19
+ train_ddp_refiner.py \
20
+ --exp_name ${EXP_NAME} \
21
+ --model_config /remote-home1/clh/config.json \
22
+ --vae_path /remote-home1/clh/models/4_8_8_4_startgan10k_25_sr2_istockfromfjandk400andistock_3DUD_mse/test140k\
23
+ --video_path /remote-home1/dataset/data_split_1024/ \
24
+ --eval_video_path /remote-home1/clh/dataset/webvid/videos \
25
+ --resolution 256 \
26
+ --num_frames 25 \
27
+ --batch_size 1 \
28
+ --sample_rate 1 \
29
+ --disc_start 100000 \
30
+ --save_ckpt_step 10000 \
31
+ --eval_steps 500 \
32
+ --eval_batch_size 1 \
33
+ --eval_num_frames 33 \
34
+ --eval_sample_rate 1 \
35
+ --eval_lpips \
36
+ --eval_subset_size 100 \
37
+ --ema \
38
+ --ema_decay 0.999 \
39
+ --perceptual_weight 1.0 \
40
+ --loss_type l1 \
41
+ --disc_cls causalvideovae.model.losses.LPIPSWithDiscriminator3Drefiner
scripts/vae_demo.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export NCCL_DEBUG=INFO
2
+ export NCCL_SOCKET_IFNAME=ibs11
3
+ export NCCL_IB_DISABLE=1
4
+ REAL_DATASET_DIR=/remote-home1/clh/dataset/panda70m_val
5
+ EXP_NAME=test_train
6
+ SAMPLE_RATE=1
7
+ NUM_FRAMES=33
8
+ RESOLUTION=256
9
+ SUBSET_SIZE=100
10
+ CKPT=/storage/clh/Open-Sora/OpenSora-VAE-v1.2
11
+
12
+ python scripts/vae_demo.py\
13
+ --ckpt ${CKPT} \
14
+ --config /storage/clh/Causal-Video-VAE/opensora/video.py\
15
+ --enable_tiling \
test.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import lpips
2
+ lpips.LPIPS(net='alex', spatial=True)
train_ddp.py ADDED
@@ -0,0 +1,586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.distributed as dist
4
+ from torch.nn.parallel import DistributedDataParallel as DDP
5
+ from torch.utils.data import DataLoader, DistributedSampler, Subset
6
+ import argparse
7
+ import logging
8
+ from colorlog import ColoredFormatter
9
+ import tqdm
10
+ from itertools import chain
11
+ import wandb
12
+ import random
13
+ import numpy as np
14
+ from pathlib import Path
15
+ from einops import rearrange
16
+ from causalvideovae.model import CausalVAEModel, EMA
17
+ from causalvideovae.utils.utils import RealVideoDataset
18
+ from causalvideovae.model.dataset_videobase import VideoDataset
19
+ from causalvideovae.model.utils.module_utils import resolve_str_to_obj
20
+ from causalvideovae.model.utils.video_utils import tensor_to_video
21
+ import time
22
+ try:
23
+ import lpips
24
+ except:
25
+ raise Exception("Need lpips to valid.")
26
+
27
+ def set_random_seed(seed):
28
+ random.seed(seed)
29
+ np.random.seed(seed)
30
+ torch.manual_seed(seed)
31
+ torch.cuda.manual_seed(seed)
32
+ torch.cuda.manual_seed_all(seed)
33
+
34
+
35
+ def ddp_setup():
36
+ dist.init_process_group(backend="nccl")
37
+ torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
38
+
39
+
40
+ def setup_logger(rank):
41
+ logger = logging.getLogger()
42
+ logger.setLevel(logging.INFO)
43
+ formatter = ColoredFormatter(
44
+ f"[rank{rank}] %(log_color)s%(asctime)s - %(levelname)s - %(message)s",
45
+ datefmt="%Y-%m-%d %H:%M:%S",
46
+ log_colors={
47
+ "DEBUG": "cyan",
48
+ "INFO": "green",
49
+ "WARNING": "yellow",
50
+ "ERROR": "red",
51
+ "CRITICAL": "bold_red",
52
+ },
53
+ reset=True,
54
+ style="%",
55
+ )
56
+ stream_handler = logging.StreamHandler()
57
+ stream_handler.setLevel(logging.DEBUG)
58
+ stream_handler.setFormatter(formatter)
59
+
60
+ if not logger.handlers:
61
+ logger.addHandler(stream_handler)
62
+
63
+ return logger
64
+
65
+
66
+ def check_unused_params(model):
67
+ unused_params = []
68
+ for name, param in model.named_parameters():
69
+ if param.grad is None:
70
+ unused_params.append(name)
71
+ return unused_params
72
+
73
+
74
+ def set_requires_grad_optimizer(optimizer, requires_grad):
75
+ for param_group in optimizer.param_groups:
76
+ for param in param_group["params"]:
77
+ param.requires_grad = requires_grad
78
+
79
+
80
+ def total_params(model):
81
+ total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
82
+ total_params_in_millions = total_params / 1e6
83
+ return int(total_params_in_millions)
84
+
85
+
86
+ def get_exp_name(args):
87
+ return f"{args.exp_name}-lr{args.lr:.2e}-bs{args.batch_size}-rs{args.resolution}-sr{args.sample_rate}-fr{args.num_frames}"
88
+
89
+ def set_train(modules):
90
+ for module in modules:
91
+ module.train()
92
+
93
+ def set_eval(modules):
94
+ for module in modules:
95
+ module.eval()
96
+
97
+ def set_modules_requires_grad(modules, requires_grad):
98
+ for module in modules:
99
+ module.requires_grad_(requires_grad)
100
+
101
+ def save_checkpoint(
102
+ epoch,
103
+ batch_idx,
104
+ optimizer_state,
105
+ state_dict,
106
+ scaler_state,
107
+ checkpoint_dir,
108
+ filename="checkpoint.ckpt",
109
+ ema_state_dict={}
110
+ ):
111
+ filepath = checkpoint_dir / Path(filename)
112
+ torch.save(
113
+ {
114
+ "epoch": epoch,
115
+ "batch_idx": batch_idx,
116
+ "optimizer_state": optimizer_state,
117
+ "state_dict": state_dict,
118
+ "ema_state_dict": ema_state_dict,
119
+ "scaler_state": scaler_state,
120
+ },
121
+ filepath,
122
+ )
123
+ return filepath
124
+
125
+
126
+ def valid(rank, model, val_dataloader, precision, args):
127
+ if args.eval_lpips:
128
+ lpips_model = lpips.LPIPS(net='alex', spatial=True)
129
+ lpips_model.to(rank)
130
+ lpips_model = DDP(lpips_model, device_ids=[rank])
131
+ lpips_model.requires_grad_(False)
132
+ lpips_model.eval()
133
+
134
+ bar = None
135
+ if rank == 0:
136
+ bar = tqdm.tqdm(total=len(val_dataloader), desc="Validation...")
137
+
138
+ psnr_list = []
139
+ lpips_list = []
140
+ video_log = []
141
+ num_video_log = args.eval_num_video_log
142
+
143
+ with torch.no_grad():
144
+ for batch_idx, batch in enumerate(val_dataloader):
145
+ inputs = batch['video'].to(rank)
146
+ with torch.cuda.amp.autocast(dtype=precision):
147
+ video_recon, _ = model(inputs)
148
+
149
+ # Upload videos
150
+ if rank == 0:
151
+ for i in range(len(video_recon)):
152
+ if num_video_log <= 0:
153
+ break
154
+ video = tensor_to_video(video_recon[i])
155
+ video_log.append(video)
156
+ num_video_log -= 1
157
+
158
+ inputs = (rearrange(inputs, "b c t h w -> (b t) c h w").contiguous()+1)/2.0
159
+ video_recon = (rearrange(video_recon, "b c t h w -> (b t) c h w").contiguous()+1)/2.0
160
+
161
+ # Calculate PSNR
162
+ mse = torch.mean(torch.square(inputs - video_recon), dim=(1,2,3))
163
+ psnr = 20 * torch.log10(1 / torch.sqrt(mse))
164
+ psnr = psnr.mean().detach().cpu().item()
165
+
166
+ # Calculate LPIPS
167
+ if args.eval_lpips:
168
+ lpips_score = lpips_model.forward(inputs, video_recon).mean().detach().cpu().item()
169
+ lpips_list.append(lpips_score)
170
+
171
+ psnr_list.append(psnr)
172
+ if rank == 0:
173
+ bar.update()
174
+ # Release gpus memory
175
+ torch.cuda.empty_cache()
176
+ return psnr_list, lpips_list, video_log
177
+
178
+ def gather_valid_result(psnr_list, lpips_list, video_log_list, rank, world_size):
179
+ gathered_psnr_list = [None for _ in range(world_size)]
180
+ gathered_lpips_list = [None for _ in range(world_size)]
181
+ gathered_video_logs = [None for _ in range(world_size)]
182
+
183
+ dist.all_gather_object(gathered_psnr_list, psnr_list)
184
+ dist.all_gather_object(gathered_lpips_list, lpips_list)
185
+ dist.all_gather_object(gathered_video_logs, video_log_list)
186
+ return np.array(gathered_psnr_list).mean(), np.array(gathered_lpips_list).mean(), list(chain(*gathered_video_logs))
187
+
188
+ def train(args):
189
+ # Setup logger
190
+ ddp_setup()
191
+ rank = int(os.environ["LOCAL_RANK"])
192
+ logger = setup_logger(rank)
193
+
194
+ # Init
195
+ ckpt_dir = Path(args.ckpt_dir) / Path(get_exp_name(args))
196
+ if rank == 0:
197
+ try:
198
+ ckpt_dir.mkdir(exist_ok=False, parents=True)
199
+ except:
200
+ logger.warning(f"`{ckpt_dir}` exists!")
201
+ time.sleep(5)
202
+
203
+ logger.warning("Connecting to WANDB...")
204
+ wandb.init(
205
+ project=os.environ.get("WANDB_PROJECT", "causalvideovae"),
206
+ config=args,
207
+ name=get_exp_name(args)
208
+ )
209
+ dist.barrier()
210
+
211
+ # Load generator model
212
+ if args.pretrained_model_name_or_path is not None:
213
+ if rank == 0:
214
+ logger.warning(
215
+ f"You are loading a checkpoint from `{args.pretrained_model_name_or_path}`."
216
+ )
217
+ model = CausalVAEModel.from_pretrained(
218
+ args.pretrained_model_name_or_path, ignore_mismatched_sizes=False
219
+ )
220
+ elif args.model_config is not None:
221
+ if rank == 0:
222
+ logger.warning(f"Model will be inited randomly.")
223
+ model = CausalVAEModel.from_config(args.model_config)
224
+ else:
225
+ raise Exception(
226
+ "You should set either `--pretrained_model_name_or_path` or `--model_config`"
227
+ )
228
+
229
+ # Load discriminator model
230
+ disc_cls = resolve_str_to_obj(args.disc_cls, append=False)
231
+ logger.warning(f"disc_class: {args.disc_cls} perceptual_weight: {args.perceptual_weight} loss_type: {args.loss_type}")
232
+ disc = disc_cls(
233
+ disc_start=args.disc_start,
234
+ disc_weight=args.disc_weight,
235
+ kl_weight=args.kl_weight,
236
+ logvar_init=args.logvar_init,
237
+ perceptual_weight=args.perceptual_weight,
238
+ loss_type=args.loss_type
239
+ )
240
+
241
+ # DDP
242
+ model = model.to(rank)
243
+ model = DDP(
244
+ model, device_ids=[rank], find_unused_parameters=args.find_unused_parameters
245
+ )
246
+ disc = disc.to(rank)
247
+ disc = DDP(
248
+ disc, device_ids=[rank], find_unused_parameters=args.find_unused_parameters
249
+ )
250
+
251
+ # Load dataset
252
+ dataset = VideoDataset(
253
+ args.video_path,
254
+ sequence_length=args.num_frames,
255
+ resolution=args.resolution,
256
+ sample_rate=args.sample_rate,
257
+ dynamic_sample=args.dynamic_sample,
258
+ )
259
+ ddp_sampler = DistributedSampler(dataset)
260
+ dataloader = DataLoader(
261
+ dataset, batch_size=args.batch_size, sampler=ddp_sampler, pin_memory=True, num_workers=args.dataset_num_worker
262
+ )
263
+
264
+ val_dataset = RealVideoDataset(
265
+ real_video_dir=args.eval_video_path,
266
+ num_frames=args.eval_num_frames,
267
+ sample_rate=args.eval_sample_rate,
268
+ crop_size=args.eval_resolution,
269
+ resolution=args.eval_resolution,
270
+ )
271
+ indices = range(args.eval_subset_size)
272
+ val_dataset = Subset(val_dataset, indices=indices)
273
+ val_sampler = DistributedSampler(val_dataset)
274
+ val_dataloader = DataLoader(val_dataset, batch_size=args.eval_batch_size, sampler=val_sampler, pin_memory=True)
275
+
276
+
277
+ # Optimizer
278
+ modules_to_train = [module for module in model.module.get_decoder()]
279
+ if not args.freeze_encoder:
280
+ modules_to_train += [module for module in model.module.get_encoder()]
281
+ else:
282
+ for module in model.module.get_encoder():
283
+ module.eval()
284
+ module.requires_grad_(False)
285
+ logger.warning("Encoder is freezed!")
286
+
287
+ parameters_to_train = []
288
+ for module in modules_to_train:
289
+ parameters_to_train += module.parameters()
290
+
291
+ gen_optimizer = torch.optim.Adam(parameters_to_train, lr=args.lr)
292
+ disc_optimizer = torch.optim.Adam(
293
+ disc.module.discriminator.parameters(), lr=args.lr
294
+ )
295
+
296
+ # AMP scaler
297
+ scaler = torch.cuda.amp.GradScaler()
298
+ precision = torch.bfloat16
299
+ if args.mix_precision == "fp16":
300
+ precision = torch.float16
301
+ elif args.mix_precision == "fp32":
302
+ precision = torch.float32
303
+
304
+
305
+
306
+ if args.resume_from_checkpoint:
307
+ if not os.path.isfile(args.resume_from_checkpoint):
308
+ raise Exception(
309
+ f"Make sure `{args.resume_from_checkpoint}` is a ckpt file."
310
+ )
311
+ checkpoint = torch.load(args.resume_from_checkpoint, map_location="cpu")
312
+
313
+ if "ema_state_dict" in checkpoint and len(checkpoint['ema_state_dict']) > 0 and os.environ.get("NOT_USE_EMA_MODEL", 0) == 0:
314
+ sd = checkpoint["ema_state_dict"]
315
+ sd = {key.replace("module.", ""): value for key, value in sd.items()}
316
+ model.module.load_state_dict(sd, strict=True)
317
+ logger.info("Load from EMA state dict! If you want to load from original state dict, you should set NOT_USE_EMA_MODEL=1.")
318
+ else:
319
+ model.module.load_state_dict(checkpoint["state_dict"]["gen_model"])
320
+ disc.module.load_state_dict(checkpoint["state_dict"]["dics_model"], strict=False)
321
+ if not args.not_resume_training_process:
322
+ scaler.load_state_dict(checkpoint["scaler_state"])
323
+ gen_optimizer.load_state_dict(checkpoint["optimizer_state"]["gen_optimizer"])
324
+ disc_optimizer.load_state_dict(checkpoint["optimizer_state"]["disc_optimizer"])
325
+ start_epoch = checkpoint["epoch"]
326
+ start_batch_idx = checkpoint.get("batch_idx", 0)
327
+ logger.info(
328
+ f"Checkpoint loaded from {args.resume_from_checkpoint}, starting from epoch {start_epoch} batch {start_batch_idx}"
329
+ )
330
+ else:
331
+ logger.warning(
332
+ f"Checkpoint loaded from {args.resume_from_checkpoint}, starting from epoch {start_epoch} batch {start_batch_idx}. But training process is not resumed."
333
+ )
334
+
335
+ if args.ema:
336
+ logger.warning(f"Start with EMA. EMA decay = {args.ema_decay}.")
337
+ ema = EMA(model, args.ema_decay)
338
+ ema.register()
339
+
340
+ # Training loop
341
+ logger.info("Prepared!")
342
+ dist.barrier()
343
+ if rank == 0:
344
+ logger.info(f"=== Model Params ===")
345
+ logger.info(f"Generator:\t\t{total_params(model.module)}M")
346
+ logger.info(f"\t- Encoder:\t{total_params(model.module.encoder):d}M")
347
+ logger.info(f"\t- Decoder:\t{total_params(model.module.decoder):d}M")
348
+ logger.info(f"Discriminator:\t{total_params(disc.module):d}M")
349
+ logger.info(f"===========")
350
+ logger.info(f"Precision is set to: {args.mix_precision}!")
351
+ logger.info("Start training!")
352
+
353
+ # Training Bar
354
+ bar_desc = ""
355
+ bar = None
356
+ if rank == 0:
357
+ max_steps = (
358
+ args.epochs * len(dataloader) if args.max_steps is None else args.max_steps
359
+ )
360
+ bar = tqdm.tqdm(total=max_steps, desc=bar_desc.format(current_epoch=0, loss=0))
361
+ bar_desc = "Epoch: {current_epoch}, Loss: {loss}"
362
+ logger.warning("Training Details: ")
363
+ logger.warning(f" Max steps: {max_steps}")
364
+ logger.warning(f" Dataset Samples: {len(dataloader)}")
365
+ logger.warning(
366
+ f" Total Batch Size: {args.batch_size} * {os.environ['WORLD_SIZE']}"
367
+ )
368
+ dist.barrier()
369
+
370
+ # Training Loop
371
+ num_epochs = args.epochs
372
+ current_step = 1
373
+
374
+ def update_bar(bar):
375
+ if rank == 0:
376
+ bar.desc = bar_desc.format(current_epoch=epoch, loss=f"-")
377
+ bar.update()
378
+ for epoch in range(num_epochs):
379
+ """
380
+ if epoch < start_epoch:
381
+ update_bar(bar)
382
+ current_step += len(dataloader)
383
+ continue
384
+ """
385
+ set_train(modules_to_train)
386
+ ddp_sampler.set_epoch(epoch) # Shuffle data at every epoch
387
+ for batch_idx, batch in enumerate(dataloader):
388
+ """
389
+ if epoch == start_epoch and batch_idx < start_batch_idx:
390
+ update_bar(bar)
391
+ current_step += 1
392
+ continue
393
+ """
394
+ inputs = batch["video"].to(rank)
395
+ if (
396
+ current_step % 2 == 1
397
+ and current_step >= disc.module.discriminator_iter_start
398
+ ):
399
+ set_modules_requires_grad(modules_to_train, False)
400
+ step_gen = False
401
+ step_dis = True
402
+ else:
403
+ set_modules_requires_grad(modules_to_train, True)
404
+ step_gen = True
405
+ step_dis = False
406
+
407
+ assert (
408
+ step_gen or step_dis
409
+ ), "You should backward either Gen or Dis in a step."
410
+
411
+ with torch.cuda.amp.autocast(dtype=precision):
412
+ outputs, posterior = model(inputs)
413
+
414
+ # Generator Step
415
+ if step_gen:
416
+ with torch.cuda.amp.autocast(dtype=precision):
417
+ g_loss, g_log = disc(
418
+ inputs,
419
+ outputs,
420
+ posterior,
421
+ optimizer_idx=0,
422
+ global_step=current_step,
423
+ last_layer=model.module.get_last_layer(),
424
+ split="train",
425
+ )
426
+ gen_optimizer.zero_grad()
427
+ scaler.scale(g_loss).backward()
428
+ scaler.step(gen_optimizer)
429
+ scaler.update()
430
+ if args.ema:
431
+ ema.update()
432
+ if rank == 0 and current_step % args.log_steps == 0:
433
+ wandb.log({"train/generator_loss": g_loss.item()}, step=current_step)
434
+
435
+ # Discriminator Step
436
+ if step_dis:
437
+ with torch.cuda.amp.autocast(dtype=precision):
438
+ d_loss, d_log = disc(
439
+ inputs,
440
+ outputs,
441
+ posterior,
442
+ optimizer_idx=1,
443
+ global_step=current_step,
444
+ last_layer=None,
445
+ split="train",
446
+ )
447
+ disc_optimizer.zero_grad()
448
+ scaler.scale(d_loss).backward()
449
+ scaler.step(disc_optimizer)
450
+ scaler.update()
451
+ if rank == 0 and current_step % args.log_steps == 0:
452
+ wandb.log({"train/discriminator_loss": d_loss.item()}, step=current_step)
453
+
454
+ def valid_model(model, name=""):
455
+ set_eval(modules_to_train)
456
+ psnr_list, lpips_list, video_log = valid(rank, model, val_dataloader, precision, args)
457
+ valid_psnr, valid_lpips, valid_video_log = gather_valid_result(psnr_list, lpips_list, video_log, rank, dist.get_world_size())
458
+ if rank == 0:
459
+ name = "_" + name if name != "" else name
460
+ wandb.log({f"val{name}/recon": wandb.Video(np.array(valid_video_log), fps=10)}, step=current_step)
461
+ wandb.log({f"val{name}/psnr": valid_psnr}, step=current_step)
462
+ wandb.log({f"val{name}/lpips": valid_lpips}, step=current_step)
463
+ logger.info(f"{name} Validation done.")
464
+
465
+ if current_step % args.eval_steps == 0 or current_step == 1:
466
+ if rank == 0:
467
+ logger.info("Starting validation...")
468
+ valid_model(model)
469
+ if args.ema:
470
+ ema.apply_shadow()
471
+ valid_model(model, "ema")
472
+ ema.restore()
473
+
474
+ # Checkpoint
475
+ if current_step % args.save_ckpt_step == 0 and rank == 0:
476
+ file_path = save_checkpoint(
477
+ epoch,
478
+ batch_idx,
479
+ {
480
+ "gen_optimizer": gen_optimizer.state_dict(),
481
+ "disc_optimizer": disc_optimizer.state_dict(),
482
+ },
483
+ {
484
+ "gen_model": model.module.state_dict(),
485
+ "dics_model": disc.module.state_dict(),
486
+ },
487
+ scaler.state_dict(),
488
+ ckpt_dir,
489
+ f"checkpoint-{current_step}.ckpt",
490
+ ema_state_dict=ema.shadow if args.ema else {}
491
+ )
492
+ logger.info(f"Checkpoint has been saved to `{file_path}`.")
493
+
494
+ # Update step
495
+ update_bar(bar)
496
+ current_step += 1
497
+
498
+ dist.destroy_process_group()
499
+
500
+
501
+ def main():
502
+ parser = argparse.ArgumentParser(description="Distributed Training")
503
+ # Exp setting
504
+ parser.add_argument(
505
+ "--exp_name", type=str, default="test", help="number of epochs to train"
506
+ )
507
+ parser.add_argument("--seed", type=int, default=1234, help="seed")
508
+ # Training setting
509
+ parser.add_argument(
510
+ "--epochs", type=int, default=10, help="number of epochs to train"
511
+ )
512
+ parser.add_argument(
513
+ "--max_steps", type=int, default=None, help="number of epochs to train"
514
+ )
515
+ parser.add_argument("--save_ckpt_step", type=int, default=1000, help="")
516
+ parser.add_argument("--ckpt_dir", type=str, default="./results/", help="")
517
+ parser.add_argument(
518
+ "--batch_size", type=int, default=1, help="batch size for training"
519
+ )
520
+ parser.add_argument("--lr", type=float, default=1e-5, help="learning rate")
521
+ parser.add_argument("--log_steps", type=int, default=5, help="log steps")
522
+ parser.add_argument("--freeze_encoder", action="store_true", help="")
523
+
524
+ # Data
525
+ parser.add_argument("--video_path", type=str, default=None, help="")
526
+ parser.add_argument("--num_frames", type=int, default=17, help="")
527
+ parser.add_argument("--resolution", type=int, default=256, help="")
528
+ parser.add_argument("--sample_rate", type=int, default=2, help="")
529
+ parser.add_argument("--dynamic_sample", type=bool, default=False, help="")
530
+ # Generator model
531
+ parser.add_argument("--find_unused_parameters", action="store_true", help="")
532
+ parser.add_argument(
533
+ "--pretrained_model_name_or_path", type=str, default=None, help=""
534
+ )
535
+ parser.add_argument("--resume_from_checkpoint", type=str, default=None, help="")
536
+ parser.add_argument("--not_resume_training_process", action="store_true", help="")
537
+ parser.add_argument("--model_config", type=str, default=None, help="")
538
+ parser.add_argument(
539
+ "--mix_precision",
540
+ type=str,
541
+ default="bf16",
542
+ choices=["fp16", "bf16", "fp32"],
543
+ help="precision for training",
544
+ )
545
+
546
+ # Discriminator Model
547
+ parser.add_argument("--load_disc_from_checkpoint", type=str, default=None, help="")
548
+ parser.add_argument(
549
+ "--disc_cls",
550
+ type=str,
551
+ default="causalvideovae.model.losses.LPIPSWithDiscriminator3D",
552
+ help="",
553
+ )
554
+ parser.add_argument("--disc_start", type=int, default=5, help="")
555
+ parser.add_argument("--disc_weight", type=float, default=0.5, help="")
556
+ parser.add_argument("--kl_weight", type=float, default=1e-06, help="")
557
+ parser.add_argument("--perceptual_weight", type=float, default=1.0, help="")
558
+ parser.add_argument("--loss_type", type=str, default="l1", help="")
559
+ parser.add_argument("--logvar_init", type=float, default=0.0, help="")
560
+
561
+ # Validation
562
+ parser.add_argument("--eval_steps", type=int, default=1000, help="")
563
+ parser.add_argument("--eval_video_path", type=str, default=None, help="")
564
+ parser.add_argument("--eval_num_frames", type=int, default=17, help="")
565
+ parser.add_argument("--eval_resolution", type=int, default=256, help="")
566
+ parser.add_argument("--eval_sample_rate", type=int, default=1, help="")
567
+ parser.add_argument("--eval_batch_size", type=int, default=8, help="")
568
+ parser.add_argument("--eval_subset_size", type=int, default=100, help="")
569
+ parser.add_argument("--eval_num_video_log", type=int, default=2, help="")
570
+ parser.add_argument("--eval_lpips", action="store_true", help="")
571
+
572
+ # Dataset
573
+ parser.add_argument("--dataset_num_worker", type=int, default=16, help="")
574
+
575
+ # EMA
576
+ parser.add_argument("--ema", action="store_true", help="")
577
+ parser.add_argument("--ema_decay", type=float, default=0.999, help="")
578
+
579
+ args = parser.parse_args()
580
+
581
+ set_random_seed(args.seed)
582
+ train(args)
583
+
584
+
585
+ if __name__ == "__main__":
586
+ main()