Prajwal-r-k commited on
Commit
e55d8b7
·
verified ·
1 Parent(s): 200c833

Upload 126 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +9 -0
  2. NAFNet/.gitignore +9 -0
  3. NAFNet/LICENSE +228 -0
  4. NAFNet/VERSION +1 -0
  5. NAFNet/basicsr/data/__init__.py +135 -0
  6. NAFNet/basicsr/data/data_sampler.py +56 -0
  7. NAFNet/basicsr/data/data_util.py +340 -0
  8. NAFNet/basicsr/data/ffhq_dataset.py +71 -0
  9. NAFNet/basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt +0 -0
  10. NAFNet/basicsr/data/meta_info/meta_info_REDS4_test_GT.txt +4 -0
  11. NAFNet/basicsr/data/meta_info/meta_info_REDS_GT.txt +270 -0
  12. NAFNet/basicsr/data/meta_info/meta_info_REDSofficial4_test_GT.txt +4 -0
  13. NAFNet/basicsr/data/meta_info/meta_info_REDSval_official_test_GT.txt +30 -0
  14. NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_GT.txt +0 -0
  15. NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_fast_GT.txt +1225 -0
  16. NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_medium_GT.txt +0 -0
  17. NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_slow_GT.txt +1613 -0
  18. NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt +0 -0
  19. NAFNet/basicsr/data/paired_image_SR_LR_FullImage_Memory_dataset.py +296 -0
  20. NAFNet/basicsr/data/paired_image_SR_LR_dataset.py +301 -0
  21. NAFNet/basicsr/data/paired_image_dataset.py +135 -0
  22. NAFNet/basicsr/data/prefetch_dataloader.py +132 -0
  23. NAFNet/basicsr/data/reds_dataset.py +243 -0
  24. NAFNet/basicsr/data/single_image_dataset.py +73 -0
  25. NAFNet/basicsr/data/transforms.py +247 -0
  26. NAFNet/basicsr/data/video_test_dataset.py +331 -0
  27. NAFNet/basicsr/data/vimeo90k_dataset.py +136 -0
  28. NAFNet/basicsr/demo.py +62 -0
  29. NAFNet/basicsr/demo_ssr.py +119 -0
  30. NAFNet/basicsr/metrics/__init__.py +10 -0
  31. NAFNet/basicsr/metrics/fid.py +108 -0
  32. NAFNet/basicsr/metrics/metric_util.py +53 -0
  33. NAFNet/basicsr/metrics/niqe.py +211 -0
  34. NAFNet/basicsr/metrics/niqe_pris_params.npz +3 -0
  35. NAFNet/basicsr/metrics/psnr_ssim.py +358 -0
  36. NAFNet/basicsr/models/__init__.py +48 -0
  37. NAFNet/basicsr/models/archs/Baseline_arch.py +202 -0
  38. NAFNet/basicsr/models/archs/NAFNet_arch.py +202 -0
  39. NAFNet/basicsr/models/archs/NAFSSR_arch.py +170 -0
  40. NAFNet/basicsr/models/archs/__init__.py +52 -0
  41. NAFNet/basicsr/models/archs/arch_util.py +350 -0
  42. NAFNet/basicsr/models/archs/local_arch.py +104 -0
  43. NAFNet/basicsr/models/base_model.py +356 -0
  44. NAFNet/basicsr/models/image_restoration_model.py +413 -0
  45. NAFNet/basicsr/models/losses/__init__.py +11 -0
  46. NAFNet/basicsr/models/losses/loss_util.py +101 -0
  47. NAFNet/basicsr/models/losses/losses.py +116 -0
  48. NAFNet/basicsr/models/lr_scheduler.py +189 -0
  49. NAFNet/basicsr/test.py +70 -0
  50. NAFNet/basicsr/train.py +305 -0
.gitattributes CHANGED
@@ -53,3 +53,12 @@ NAFNetModel/NAFNet/libuv/docs/src/static/architecture.png filter=lfs diff=lfs me
53
  NAFNetModel/NAFNet/libuv/docs/src/static/diagrams.key/preview.jpg filter=lfs diff=lfs merge=lfs -text
54
  NAFNetModel/outputs/output.png filter=lfs diff=lfs merge=lfs -text
55
  NAFNetModel/uploads/101.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
53
  NAFNetModel/NAFNet/libuv/docs/src/static/diagrams.key/preview.jpg filter=lfs diff=lfs merge=lfs -text
54
  NAFNetModel/outputs/output.png filter=lfs diff=lfs merge=lfs -text
55
  NAFNetModel/uploads/101.png filter=lfs diff=lfs merge=lfs -text
56
+ NAFNet/demo/noisy.png filter=lfs diff=lfs merge=lfs -text
57
+ NAFNet/demo/sr_img_l.png filter=lfs diff=lfs merge=lfs -text
58
+ NAFNet/demo/sr_img_r.png filter=lfs diff=lfs merge=lfs -text
59
+ NAFNet/figures/deblur.gif filter=lfs diff=lfs merge=lfs -text
60
+ NAFNet/figures/denoise.gif filter=lfs diff=lfs merge=lfs -text
61
+ NAFNet/figures/NAFSSR_arch.jpg filter=lfs diff=lfs merge=lfs -text
62
+ NAFNet/figures/NAFSSR_params.jpg filter=lfs diff=lfs merge=lfs -text
63
+ NAFNet/figures/PSNR_vs_MACs.jpg filter=lfs diff=lfs merge=lfs -text
64
+ NAFNet/figures/StereoSR.gif filter=lfs diff=lfs merge=lfs -text
NAFNet/.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ .idea/*
3
+ experiments
4
+ logs/
5
+ *results*
6
+ *__pycache__*
7
+ *.sh
8
+ datasets
9
+ basicsr.egg-info
NAFNet/LICENSE ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 megvii-model
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
22
+
23
+
24
+
25
+ BasicSR
26
+ Copyright 2018-2020 BasicSR Authors
27
+
28
+ Apache License
29
+ Version 2.0, January 2004
30
+ http://www.apache.org/licenses/
31
+
32
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
33
+
34
+ 1. Definitions.
35
+
36
+ "License" shall mean the terms and conditions for use, reproduction,
37
+ and distribution as defined by Sections 1 through 9 of this document.
38
+
39
+ "Licensor" shall mean the copyright owner or entity authorized by
40
+ the copyright owner that is granting the License.
41
+
42
+ "Legal Entity" shall mean the union of the acting entity and all
43
+ other entities that control, are controlled by, or are under common
44
+ control with that entity. For the purposes of this definition,
45
+ "control" means (i) the power, direct or indirect, to cause the
46
+ direction or management of such entity, whether by contract or
47
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
48
+ outstanding shares, or (iii) beneficial ownership of such entity.
49
+
50
+ "You" (or "Your") shall mean an individual or Legal Entity
51
+ exercising permissions granted by this License.
52
+
53
+ "Source" form shall mean the preferred form for making modifications,
54
+ including but not limited to software source code, documentation
55
+ source, and configuration files.
56
+
57
+ "Object" form shall mean any form resulting from mechanical
58
+ transformation or translation of a Source form, including but
59
+ not limited to compiled object code, generated documentation,
60
+ and conversions to other media types.
61
+
62
+ "Work" shall mean the work of authorship, whether in Source or
63
+ Object form, made available under the License, as indicated by a
64
+ copyright notice that is included in or attached to the work
65
+ (an example is provided in the Appendix below).
66
+
67
+ "Derivative Works" shall mean any work, whether in Source or Object
68
+ form, that is based on (or derived from) the Work and for which the
69
+ editorial revisions, annotations, elaborations, or other modifications
70
+ represent, as a whole, an original work of authorship. For the purposes
71
+ of this License, Derivative Works shall not include works that remain
72
+ separable from, or merely link (or bind by name) to the interfaces of,
73
+ the Work and Derivative Works thereof.
74
+
75
+ "Contribution" shall mean any work of authorship, including
76
+ the original version of the Work and any modifications or additions
77
+ to that Work or Derivative Works thereof, that is intentionally
78
+ submitted to Licensor for inclusion in the Work by the copyright owner
79
+ or by an individual or Legal Entity authorized to submit on behalf of
80
+ the copyright owner. For the purposes of this definition, "submitted"
81
+ means any form of electronic, verbal, or written communication sent
82
+ to the Licensor or its representatives, including but not limited to
83
+ communication on electronic mailing lists, source code control systems,
84
+ and issue tracking systems that are managed by, or on behalf of, the
85
+ Licensor for the purpose of discussing and improving the Work, but
86
+ excluding communication that is conspicuously marked or otherwise
87
+ designated in writing by the copyright owner as "Not a Contribution."
88
+
89
+ "Contributor" shall mean Licensor and any individual or Legal Entity
90
+ on behalf of whom a Contribution has been received by Licensor and
91
+ subsequently incorporated within the Work.
92
+
93
+ 2. Grant of Copyright License. Subject to the terms and conditions of
94
+ this License, each Contributor hereby grants to You a perpetual,
95
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
96
+ copyright license to reproduce, prepare Derivative Works of,
97
+ publicly display, publicly perform, sublicense, and distribute the
98
+ Work and such Derivative Works in Source or Object form.
99
+
100
+ 3. Grant of Patent License. Subject to the terms and conditions of
101
+ this License, each Contributor hereby grants to You a perpetual,
102
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
103
+ (except as stated in this section) patent license to make, have made,
104
+ use, offer to sell, sell, import, and otherwise transfer the Work,
105
+ where such license applies only to those patent claims licensable
106
+ by such Contributor that are necessarily infringed by their
107
+ Contribution(s) alone or by combination of their Contribution(s)
108
+ with the Work to which such Contribution(s) was submitted. If You
109
+ institute patent litigation against any entity (including a
110
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
111
+ or a Contribution incorporated within the Work constitutes direct
112
+ or contributory patent infringement, then any patent licenses
113
+ granted to You under this License for that Work shall terminate
114
+ as of the date such litigation is filed.
115
+
116
+ 4. Redistribution. You may reproduce and distribute copies of the
117
+ Work or Derivative Works thereof in any medium, with or without
118
+ modifications, and in Source or Object form, provided that You
119
+ meet the following conditions:
120
+
121
+ (a) You must give any other recipients of the Work or
122
+ Derivative Works a copy of this License; and
123
+
124
+ (b) You must cause any modified files to carry prominent notices
125
+ stating that You changed the files; and
126
+
127
+ (c) You must retain, in the Source form of any Derivative Works
128
+ that You distribute, all copyright, patent, trademark, and
129
+ attribution notices from the Source form of the Work,
130
+ excluding those notices that do not pertain to any part of
131
+ the Derivative Works; and
132
+
133
+ (d) If the Work includes a "NOTICE" text file as part of its
134
+ distribution, then any Derivative Works that You distribute must
135
+ include a readable copy of the attribution notices contained
136
+ within such NOTICE file, excluding those notices that do not
137
+ pertain to any part of the Derivative Works, in at least one
138
+ of the following places: within a NOTICE text file distributed
139
+ as part of the Derivative Works; within the Source form or
140
+ documentation, if provided along with the Derivative Works; or,
141
+ within a display generated by the Derivative Works, if and
142
+ wherever such third-party notices normally appear. The contents
143
+ of the NOTICE file are for informational purposes only and
144
+ do not modify the License. You may add Your own attribution
145
+ notices within Derivative Works that You distribute, alongside
146
+ or as an addendum to the NOTICE text from the Work, provided
147
+ that such additional attribution notices cannot be construed
148
+ as modifying the License.
149
+
150
+ You may add Your own copyright statement to Your modifications and
151
+ may provide additional or different license terms and conditions
152
+ for use, reproduction, or distribution of Your modifications, or
153
+ for any such Derivative Works as a whole, provided Your use,
154
+ reproduction, and distribution of the Work otherwise complies with
155
+ the conditions stated in this License.
156
+
157
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
158
+ any Contribution intentionally submitted for inclusion in the Work
159
+ by You to the Licensor shall be under the terms and conditions of
160
+ this License, without any additional terms or conditions.
161
+ Notwithstanding the above, nothing herein shall supersede or modify
162
+ the terms of any separate license agreement you may have executed
163
+ with Licensor regarding such Contributions.
164
+
165
+ 6. Trademarks. This License does not grant permission to use the trade
166
+ names, trademarks, service marks, or product names of the Licensor,
167
+ except as required for reasonable and customary use in describing the
168
+ origin of the Work and reproducing the content of the NOTICE file.
169
+
170
+ 7. Disclaimer of Warranty. Unless required by applicable law or
171
+ agreed to in writing, Licensor provides the Work (and each
172
+ Contributor provides its Contributions) on an "AS IS" BASIS,
173
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
174
+ implied, including, without limitation, any warranties or conditions
175
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
176
+ PARTICULAR PURPOSE. You are solely responsible for determining the
177
+ appropriateness of using or redistributing the Work and assume any
178
+ risks associated with Your exercise of permissions under this License.
179
+
180
+ 8. Limitation of Liability. In no event and under no legal theory,
181
+ whether in tort (including negligence), contract, or otherwise,
182
+ unless required by applicable law (such as deliberate and grossly
183
+ negligent acts) or agreed to in writing, shall any Contributor be
184
+ liable to You for damages, including any direct, indirect, special,
185
+ incidental, or consequential damages of any character arising as a
186
+ result of this License or out of the use or inability to use the
187
+ Work (including but not limited to damages for loss of goodwill,
188
+ work stoppage, computer failure or malfunction, or any and all
189
+ other commercial damages or losses), even if such Contributor
190
+ has been advised of the possibility of such damages.
191
+
192
+ 9. Accepting Warranty or Additional Liability. While redistributing
193
+ the Work or Derivative Works thereof, You may choose to offer,
194
+ and charge a fee for, acceptance of support, warranty, indemnity,
195
+ or other liability obligations and/or rights consistent with this
196
+ License. However, in accepting such obligations, You may act only
197
+ on Your own behalf and on Your sole responsibility, not on behalf
198
+ of any other Contributor, and only if You agree to indemnify,
199
+ defend, and hold each Contributor harmless for any liability
200
+ incurred by, or claims asserted against, such Contributor by reason
201
+ of your accepting any such warranty or additional liability.
202
+
203
+ END OF TERMS AND CONDITIONS
204
+
205
+ APPENDIX: How to apply the Apache License to your work.
206
+
207
+ To apply the Apache License to your work, attach the following
208
+ boilerplate notice, with the fields enclosed by brackets "[]"
209
+ replaced with your own identifying information. (Don't include
210
+ the brackets!) The text should be enclosed in the appropriate
211
+ comment syntax for the file format. We also recommend that a
212
+ file or class name and description of purpose be included on the
213
+ same "printed page" as the copyright notice for easier
214
+ identification within third-party archives.
215
+
216
+ Copyright 2018-2020 BasicSR Authors
217
+
218
+ Licensed under the Apache License, Version 2.0 (the "License");
219
+ you may not use this file except in compliance with the License.
220
+ You may obtain a copy of the License at
221
+
222
+ http://www.apache.org/licenses/LICENSE-2.0
223
+
224
+ Unless required by applicable law or agreed to in writing, software
225
+ distributed under the License is distributed on an "AS IS" BASIS,
226
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
227
+ See the License for the specific language governing permissions and
228
+ limitations under the License.
NAFNet/VERSION ADDED
@@ -0,0 +1 @@
 
 
1
+ 1.2.0
NAFNet/basicsr/data/__init__.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+
8
+ import importlib
9
+ import numpy as np
10
+ import random
11
+ import torch
12
+ import torch.utils.data
13
+ from functools import partial
14
+ from os import path as osp
15
+
16
+ from basicsr.data.prefetch_dataloader import PrefetchDataLoader
17
+ from basicsr.utils import get_root_logger, scandir
18
+ from basicsr.utils.dist_util import get_dist_info
19
+
20
+ __all__ = ['create_dataset', 'create_dataloader']
21
+
22
+ # automatically scan and import dataset modules
23
+ # scan all the files under the data folder with '_dataset' in file names
24
+ data_folder = osp.dirname(osp.abspath(__file__))
25
+ dataset_filenames = [
26
+ osp.splitext(osp.basename(v))[0] for v in scandir(data_folder)
27
+ if v.endswith('_dataset.py')
28
+ ]
29
+ # import all the dataset modules
30
+ _dataset_modules = [
31
+ importlib.import_module(f'basicsr.data.{file_name}')
32
+ for file_name in dataset_filenames
33
+ ]
34
+
35
+
36
+ def create_dataset(dataset_opt):
37
+ """Create dataset.
38
+
39
+ Args:
40
+ dataset_opt (dict): Configuration for dataset. It constains:
41
+ name (str): Dataset name.
42
+ type (str): Dataset type.
43
+ """
44
+ dataset_type = dataset_opt['type']
45
+
46
+ # dynamic instantiation
47
+ for module in _dataset_modules:
48
+ dataset_cls = getattr(module, dataset_type, None)
49
+ if dataset_cls is not None:
50
+ break
51
+ if dataset_cls is None:
52
+ raise ValueError(f'Dataset {dataset_type} is not found.')
53
+
54
+ dataset = dataset_cls(dataset_opt)
55
+
56
+ logger = get_root_logger()
57
+ logger.info(
58
+ f'Dataset {dataset.__class__.__name__} - {dataset_opt["name"]} '
59
+ 'is created.')
60
+ return dataset
61
+
62
+
63
+ def create_dataloader(dataset,
64
+ dataset_opt,
65
+ num_gpu=1,
66
+ dist=False,
67
+ sampler=None,
68
+ seed=None):
69
+ """Create dataloader.
70
+
71
+ Args:
72
+ dataset (torch.utils.data.Dataset): Dataset.
73
+ dataset_opt (dict): Dataset options. It contains the following keys:
74
+ phase (str): 'train' or 'val'.
75
+ num_worker_per_gpu (int): Number of workers for each GPU.
76
+ batch_size_per_gpu (int): Training batch size for each GPU.
77
+ num_gpu (int): Number of GPUs. Used only in the train phase.
78
+ Default: 1.
79
+ dist (bool): Whether in distributed training. Used only in the train
80
+ phase. Default: False.
81
+ sampler (torch.utils.data.sampler): Data sampler. Default: None.
82
+ seed (int | None): Seed. Default: None
83
+ """
84
+ phase = dataset_opt['phase']
85
+ rank, _ = get_dist_info()
86
+ if phase == 'train':
87
+ if dist: # distributed training
88
+ batch_size = dataset_opt['batch_size_per_gpu']
89
+ num_workers = dataset_opt['num_worker_per_gpu']
90
+ else: # non-distributed training
91
+ multiplier = 1 if num_gpu == 0 else num_gpu
92
+ batch_size = dataset_opt['batch_size_per_gpu'] * multiplier
93
+ num_workers = dataset_opt['num_worker_per_gpu'] * multiplier
94
+ dataloader_args = dict(
95
+ dataset=dataset,
96
+ batch_size=batch_size,
97
+ shuffle=False,
98
+ num_workers=num_workers,
99
+ sampler=sampler,
100
+ drop_last=True,
101
+ persistent_workers=True
102
+ )
103
+ if sampler is None:
104
+ dataloader_args['shuffle'] = True
105
+ dataloader_args['worker_init_fn'] = partial(
106
+ worker_init_fn, num_workers=num_workers, rank=rank,
107
+ seed=seed) if seed is not None else None
108
+ elif phase in ['val', 'test']: # validation
109
+ dataloader_args = dict(
110
+ dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
111
+ else:
112
+ raise ValueError(f'Wrong dataset phase: {phase}. '
113
+ "Supported ones are 'train', 'val' and 'test'.")
114
+
115
+ dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
116
+
117
+ prefetch_mode = dataset_opt.get('prefetch_mode')
118
+ if prefetch_mode == 'cpu': # CPUPrefetcher
119
+ num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1)
120
+ logger = get_root_logger()
121
+ logger.info(f'Use {prefetch_mode} prefetch dataloader: '
122
+ f'num_prefetch_queue = {num_prefetch_queue}')
123
+ return PrefetchDataLoader(
124
+ num_prefetch_queue=num_prefetch_queue, **dataloader_args)
125
+ else:
126
+ # prefetch_mode=None: Normal dataloader
127
+ # prefetch_mode='cuda': dataloader for CUDAPrefetcher
128
+ return torch.utils.data.DataLoader(**dataloader_args)
129
+
130
+
131
+ def worker_init_fn(worker_id, num_workers, rank, seed):
132
+ # Set the worker seed to num_workers * rank + worker_id + seed
133
+ worker_seed = num_workers * rank + worker_id + seed
134
+ np.random.seed(worker_seed)
135
+ random.seed(worker_seed)
NAFNet/basicsr/data/data_sampler.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+
8
+ import math
9
+ import torch
10
+ from torch.utils.data.sampler import Sampler
11
+
12
+
13
+ class EnlargedSampler(Sampler):
14
+ """Sampler that restricts data loading to a subset of the dataset.
15
+
16
+ Modified from torch.utils.data.distributed.DistributedSampler
17
+ Support enlarging the dataset for iteration-based training, for saving
18
+ time when restart the dataloader after each epoch
19
+
20
+ Args:
21
+ dataset (torch.utils.data.Dataset): Dataset used for sampling.
22
+ num_replicas (int | None): Number of processes participating in
23
+ the training. It is usually the world_size.
24
+ rank (int | None): Rank of the current process within num_replicas.
25
+ ratio (int): Enlarging ratio. Default: 1.
26
+ """
27
+
28
+ def __init__(self, dataset, num_replicas, rank, ratio=1):
29
+ self.dataset = dataset
30
+ self.num_replicas = num_replicas
31
+ self.rank = rank
32
+ self.epoch = 0
33
+ self.num_samples = math.ceil(
34
+ len(self.dataset) * ratio / self.num_replicas)
35
+ self.total_size = self.num_samples * self.num_replicas
36
+
37
+ def __iter__(self):
38
+ # deterministically shuffle based on epoch
39
+ g = torch.Generator()
40
+ g.manual_seed(self.epoch)
41
+ indices = torch.randperm(self.total_size, generator=g).tolist()
42
+
43
+ dataset_size = len(self.dataset)
44
+ indices = [v % dataset_size for v in indices]
45
+
46
+ # subsample
47
+ indices = indices[self.rank:self.total_size:self.num_replicas]
48
+ assert len(indices) == self.num_samples
49
+
50
+ return iter(indices)
51
+
52
+ def __len__(self):
53
+ return self.num_samples
54
+
55
+ def set_epoch(self, epoch):
56
+ self.epoch = epoch
NAFNet/basicsr/data/data_util.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import cv2
8
+ import numpy as np
9
+ import torch
10
+ from os import path as osp
11
+ from torch.nn import functional as F
12
+
13
+ from basicsr.data.transforms import mod_crop
14
+ from basicsr.utils import img2tensor, scandir
15
+
16
+
17
+ def read_img_seq(path, require_mod_crop=False, scale=1):
18
+ """Read a sequence of images from a given folder path.
19
+
20
+ Args:
21
+ path (list[str] | str): List of image paths or image folder path.
22
+ require_mod_crop (bool): Require mod crop for each image.
23
+ Default: False.
24
+ scale (int): Scale factor for mod_crop. Default: 1.
25
+
26
+ Returns:
27
+ Tensor: size (t, c, h, w), RGB, [0, 1].
28
+ """
29
+ if isinstance(path, list):
30
+ img_paths = path
31
+ else:
32
+ img_paths = sorted(list(scandir(path, full_path=True)))
33
+ imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths]
34
+ if require_mod_crop:
35
+ imgs = [mod_crop(img, scale) for img in imgs]
36
+ imgs = img2tensor(imgs, bgr2rgb=True, float32=True)
37
+ imgs = torch.stack(imgs, dim=0)
38
+ return imgs
39
+
40
+
41
+ def generate_frame_indices(crt_idx,
42
+ max_frame_num,
43
+ num_frames,
44
+ padding='reflection'):
45
+ """Generate an index list for reading `num_frames` frames from a sequence
46
+ of images.
47
+
48
+ Args:
49
+ crt_idx (int): Current center index.
50
+ max_frame_num (int): Max number of the sequence of images (from 1).
51
+ num_frames (int): Reading num_frames frames.
52
+ padding (str): Padding mode, one of
53
+ 'replicate' | 'reflection' | 'reflection_circle' | 'circle'
54
+ Examples: current_idx = 0, num_frames = 5
55
+ The generated frame indices under different padding mode:
56
+ replicate: [0, 0, 0, 1, 2]
57
+ reflection: [2, 1, 0, 1, 2]
58
+ reflection_circle: [4, 3, 0, 1, 2]
59
+ circle: [3, 4, 0, 1, 2]
60
+
61
+ Returns:
62
+ list[int]: A list of indices.
63
+ """
64
+ assert num_frames % 2 == 1, 'num_frames should be an odd number.'
65
+ assert padding in ('replicate', 'reflection', 'reflection_circle',
66
+ 'circle'), f'Wrong padding mode: {padding}.'
67
+
68
+ max_frame_num = max_frame_num - 1 # start from 0
69
+ num_pad = num_frames // 2
70
+
71
+ indices = []
72
+ for i in range(crt_idx - num_pad, crt_idx + num_pad + 1):
73
+ if i < 0:
74
+ if padding == 'replicate':
75
+ pad_idx = 0
76
+ elif padding == 'reflection':
77
+ pad_idx = -i
78
+ elif padding == 'reflection_circle':
79
+ pad_idx = crt_idx + num_pad - i
80
+ else:
81
+ pad_idx = num_frames + i
82
+ elif i > max_frame_num:
83
+ if padding == 'replicate':
84
+ pad_idx = max_frame_num
85
+ elif padding == 'reflection':
86
+ pad_idx = max_frame_num * 2 - i
87
+ elif padding == 'reflection_circle':
88
+ pad_idx = (crt_idx - num_pad) - (i - max_frame_num)
89
+ else:
90
+ pad_idx = i - num_frames
91
+ else:
92
+ pad_idx = i
93
+ indices.append(pad_idx)
94
+ return indices
95
+
96
+
97
+ def paired_paths_from_lmdb(folders, keys):
98
+ """Generate paired paths from lmdb files.
99
+
100
+ Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is:
101
+
102
+ lq.lmdb
103
+ ├── data.mdb
104
+ ├── lock.mdb
105
+ ├── meta_info.txt
106
+
107
+ The data.mdb and lock.mdb are standard lmdb files and you can refer to
108
+ https://lmdb.readthedocs.io/en/release/ for more details.
109
+
110
+ The meta_info.txt is a specified txt file to record the meta information
111
+ of our datasets. It will be automatically created when preparing
112
+ datasets by our provided dataset tools.
113
+ Each line in the txt file records
114
+ 1)image name (with extension),
115
+ 2)image shape,
116
+ 3)compression level, separated by a white space.
117
+ Example: `baboon.png (120,125,3) 1`
118
+
119
+ We use the image name without extension as the lmdb key.
120
+ Note that we use the same key for the corresponding lq and gt images.
121
+
122
+ Args:
123
+ folders (list[str]): A list of folder path. The order of list should
124
+ be [input_folder, gt_folder].
125
+ keys (list[str]): A list of keys identifying folders. The order should
126
+ be in consistent with folders, e.g., ['lq', 'gt'].
127
+ Note that this key is different from lmdb keys.
128
+
129
+ Returns:
130
+ list[str]: Returned path list.
131
+ """
132
+ assert len(folders) == 2, (
133
+ 'The len of folders should be 2 with [input_folder, gt_folder]. '
134
+ f'But got {len(folders)}')
135
+ assert len(keys) == 2, (
136
+ 'The len of keys should be 2 with [input_key, gt_key]. '
137
+ f'But got {len(keys)}')
138
+ input_folder, gt_folder = folders
139
+ input_key, gt_key = keys
140
+
141
+ if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')):
142
+ raise ValueError(
143
+ f'{input_key} folder and {gt_key} folder should both in lmdb '
144
+ f'formats. But received {input_key}: {input_folder}; '
145
+ f'{gt_key}: {gt_folder}')
146
+ # ensure that the two meta_info files are the same
147
+ with open(osp.join(input_folder, 'meta_info.txt')) as fin:
148
+ input_lmdb_keys = [line.split('.')[0] for line in fin]
149
+ with open(osp.join(gt_folder, 'meta_info.txt')) as fin:
150
+ gt_lmdb_keys = [line.split('.')[0] for line in fin]
151
+ if set(input_lmdb_keys) != set(gt_lmdb_keys):
152
+ raise ValueError(
153
+ f'Keys in {input_key}_folder and {gt_key}_folder are different.')
154
+ else:
155
+ paths = []
156
+ for lmdb_key in sorted(input_lmdb_keys):
157
+ paths.append(
158
+ dict([(f'{input_key}_path', lmdb_key),
159
+ (f'{gt_key}_path', lmdb_key)]))
160
+ return paths
161
+
162
+
163
+ def paired_paths_from_meta_info_file(folders, keys, meta_info_file,
164
+ filename_tmpl):
165
+ """Generate paired paths from an meta information file.
166
+
167
+ Each line in the meta information file contains the image names and
168
+ image shape (usually for gt), separated by a white space.
169
+
170
+ Example of an meta information file:
171
+ ```
172
+ 0001_s001.png (480,480,3)
173
+ 0001_s002.png (480,480,3)
174
+ ```
175
+
176
+ Args:
177
+ folders (list[str]): A list of folder path. The order of list should
178
+ be [input_folder, gt_folder].
179
+ keys (list[str]): A list of keys identifying folders. The order should
180
+ be in consistent with folders, e.g., ['lq', 'gt'].
181
+ meta_info_file (str): Path to the meta information file.
182
+ filename_tmpl (str): Template for each filename. Note that the
183
+ template excludes the file extension. Usually the filename_tmpl is
184
+ for files in the input folder.
185
+
186
+ Returns:
187
+ list[str]: Returned path list.
188
+ """
189
+ assert len(folders) == 2, (
190
+ 'The len of folders should be 2 with [input_folder, gt_folder]. '
191
+ f'But got {len(folders)}')
192
+ assert len(keys) == 2, (
193
+ 'The len of keys should be 2 with [input_key, gt_key]. '
194
+ f'But got {len(keys)}')
195
+ input_folder, gt_folder = folders
196
+ input_key, gt_key = keys
197
+
198
+ with open(meta_info_file, 'r') as fin:
199
+ gt_names = [line.split(' ')[0] for line in fin]
200
+
201
+ paths = []
202
+ for gt_name in gt_names:
203
+ basename, ext = osp.splitext(osp.basename(gt_name))
204
+ input_name = f'{filename_tmpl.format(basename)}{ext}'
205
+ input_path = osp.join(input_folder, input_name)
206
+ gt_path = osp.join(gt_folder, gt_name)
207
+ paths.append(
208
+ dict([(f'{input_key}_path', input_path),
209
+ (f'{gt_key}_path', gt_path)]))
210
+ return paths
211
+
212
+
213
+ def paired_paths_from_folder(folders, keys, filename_tmpl):
214
+ """Generate paired paths from folders.
215
+
216
+ Args:
217
+ folders (list[str]): A list of folder path. The order of list should
218
+ be [input_folder, gt_folder].
219
+ keys (list[str]): A list of keys identifying folders. The order should
220
+ be in consistent with folders, e.g., ['lq', 'gt'].
221
+ filename_tmpl (str): Template for each filename. Note that the
222
+ template excludes the file extension. Usually the filename_tmpl is
223
+ for files in the input folder.
224
+
225
+ Returns:
226
+ list[str]: Returned path list.
227
+ """
228
+ assert len(folders) == 2, (
229
+ 'The len of folders should be 2 with [input_folder, gt_folder]. '
230
+ f'But got {len(folders)}')
231
+ assert len(keys) == 2, (
232
+ 'The len of keys should be 2 with [input_key, gt_key]. '
233
+ f'But got {len(keys)}')
234
+ input_folder, gt_folder = folders
235
+ input_key, gt_key = keys
236
+
237
+ input_paths = list(scandir(input_folder))
238
+ gt_paths = list(scandir(gt_folder))
239
+ assert len(input_paths) == len(gt_paths), (
240
+ f'{input_key} and {gt_key} datasets have different number of images: '
241
+ f'{len(input_paths)}, {len(gt_paths)}.')
242
+ paths = []
243
+ for idx in range(len(gt_paths)):
244
+ gt_path = gt_paths[idx]
245
+ basename, ext = osp.splitext(osp.basename(gt_path))
246
+ input_path = input_paths[idx]
247
+ basename_input, ext_input = osp.splitext(osp.basename(input_path))
248
+ input_name = f'{filename_tmpl.format(basename)}{ext_input}'
249
+ input_path = osp.join(input_folder, input_name)
250
+ assert input_name in input_paths, (f'{input_name} is not in '
251
+ f'{input_key}_paths.')
252
+ gt_path = osp.join(gt_folder, gt_path)
253
+ paths.append(
254
+ dict([(f'{input_key}_path', input_path),
255
+ (f'{gt_key}_path', gt_path)]))
256
+ return paths
257
+
258
+
259
+ def paths_from_folder(folder):
260
+ """Generate paths from folder.
261
+
262
+ Args:
263
+ folder (str): Folder path.
264
+
265
+ Returns:
266
+ list[str]: Returned path list.
267
+ """
268
+
269
+ paths = list(scandir(folder))
270
+ paths = [osp.join(folder, path) for path in paths]
271
+ return paths
272
+
273
+
274
+ def paths_from_lmdb(folder):
275
+ """Generate paths from lmdb.
276
+
277
+ Args:
278
+ folder (str): Folder path.
279
+
280
+ Returns:
281
+ list[str]: Returned path list.
282
+ """
283
+ if not folder.endswith('.lmdb'):
284
+ raise ValueError(f'Folder {folder}folder should in lmdb format.')
285
+ with open(osp.join(folder, 'meta_info.txt')) as fin:
286
+ paths = [line.split('.')[0] for line in fin]
287
+ return paths
288
+
289
+
290
+ def generate_gaussian_kernel(kernel_size=13, sigma=1.6):
291
+ """Generate Gaussian kernel used in `duf_downsample`.
292
+
293
+ Args:
294
+ kernel_size (int): Kernel size. Default: 13.
295
+ sigma (float): Sigma of the Gaussian kernel. Default: 1.6.
296
+
297
+ Returns:
298
+ np.array: The Gaussian kernel.
299
+ """
300
+ from scipy.ndimage import filters as filters
301
+ kernel = np.zeros((kernel_size, kernel_size))
302
+ # set element at the middle to one, a dirac delta
303
+ kernel[kernel_size // 2, kernel_size // 2] = 1
304
+ # gaussian-smooth the dirac, resulting in a gaussian filter
305
+ return filters.gaussian_filter(kernel, sigma)
306
+
307
+
308
+ def duf_downsample(x, kernel_size=13, scale=4):
309
+ """Downsamping with Gaussian kernel used in the DUF official code.
310
+
311
+ Args:
312
+ x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w).
313
+ kernel_size (int): Kernel size. Default: 13.
314
+ scale (int): Downsampling factor. Supported scale: (2, 3, 4).
315
+ Default: 4.
316
+
317
+ Returns:
318
+ Tensor: DUF downsampled frames.
319
+ """
320
+ assert scale in (2, 3,
321
+ 4), f'Only support scale (2, 3, 4), but got {scale}.'
322
+
323
+ squeeze_flag = False
324
+ if x.ndim == 4:
325
+ squeeze_flag = True
326
+ x = x.unsqueeze(0)
327
+ b, t, c, h, w = x.size()
328
+ x = x.view(-1, 1, h, w)
329
+ pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2
330
+ x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect')
331
+
332
+ gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale)
333
+ gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(
334
+ 0).unsqueeze(0)
335
+ x = F.conv2d(x, gaussian_filter, stride=scale)
336
+ x = x[:, :, 2:-2, 2:-2]
337
+ x = x.view(b, t, c, x.size(2), x.size(3))
338
+ if squeeze_flag:
339
+ x = x.squeeze(0)
340
+ return x
NAFNet/basicsr/data/ffhq_dataset.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ from os import path as osp
8
+ from torch.utils import data as data
9
+ from torchvision.transforms.functional import normalize
10
+
11
+ from basicsr.data.transforms import augment
12
+ from basicsr.utils import FileClient, imfrombytes, img2tensor
13
+
14
+
15
+ class FFHQDataset(data.Dataset):
16
+ """FFHQ dataset for StyleGAN.
17
+
18
+ Args:
19
+ opt (dict): Config for train datasets. It contains the following keys:
20
+ dataroot_gt (str): Data root path for gt.
21
+ io_backend (dict): IO backend type and other kwarg.
22
+ mean (list | tuple): Image mean.
23
+ std (list | tuple): Image std.
24
+ use_hflip (bool): Whether to horizontally flip.
25
+
26
+ """
27
+
28
+ def __init__(self, opt):
29
+ super(FFHQDataset, self).__init__()
30
+ self.opt = opt
31
+ # file client (io backend)
32
+ self.file_client = None
33
+ self.io_backend_opt = opt['io_backend']
34
+
35
+ self.gt_folder = opt['dataroot_gt']
36
+ self.mean = opt['mean']
37
+ self.std = opt['std']
38
+
39
+ if self.io_backend_opt['type'] == 'lmdb':
40
+ self.io_backend_opt['db_paths'] = self.gt_folder
41
+ if not self.gt_folder.endswith('.lmdb'):
42
+ raise ValueError("'dataroot_gt' should end with '.lmdb', "
43
+ f'but received {self.gt_folder}')
44
+ with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
45
+ self.paths = [line.split('.')[0] for line in fin]
46
+ else:
47
+ # FFHQ has 70000 images in total
48
+ self.paths = [
49
+ osp.join(self.gt_folder, f'{v:08d}.png') for v in range(70000)
50
+ ]
51
+
52
+ def __getitem__(self, index):
53
+ if self.file_client is None:
54
+ self.file_client = FileClient(
55
+ self.io_backend_opt.pop('type'), **self.io_backend_opt)
56
+
57
+ # load gt image
58
+ gt_path = self.paths[index]
59
+ img_bytes = self.file_client.get(gt_path)
60
+ img_gt = imfrombytes(img_bytes, float32=True)
61
+
62
+ # random horizontal flip
63
+ img_gt = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False)
64
+ # BGR to RGB, HWC to CHW, numpy to tensor
65
+ img_gt = img2tensor(img_gt, bgr2rgb=True, float32=True)
66
+ # normalize
67
+ normalize(img_gt, self.mean, self.std, inplace=True)
68
+ return {'gt': img_gt, 'gt_path': gt_path}
69
+
70
+ def __len__(self):
71
+ return len(self.paths)
NAFNet/basicsr/data/meta_info/meta_info_DIV2K800sub_GT.txt ADDED
The diff for this file is too large to render. See raw diff
 
NAFNet/basicsr/data/meta_info/meta_info_REDS4_test_GT.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ 000 100 (720,1280,3)
2
+ 011 100 (720,1280,3)
3
+ 015 100 (720,1280,3)
4
+ 020 100 (720,1280,3)
NAFNet/basicsr/data/meta_info/meta_info_REDS_GT.txt ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 000 100 (720,1280,3)
2
+ 001 100 (720,1280,3)
3
+ 002 100 (720,1280,3)
4
+ 003 100 (720,1280,3)
5
+ 004 100 (720,1280,3)
6
+ 005 100 (720,1280,3)
7
+ 006 100 (720,1280,3)
8
+ 007 100 (720,1280,3)
9
+ 008 100 (720,1280,3)
10
+ 009 100 (720,1280,3)
11
+ 010 100 (720,1280,3)
12
+ 011 100 (720,1280,3)
13
+ 012 100 (720,1280,3)
14
+ 013 100 (720,1280,3)
15
+ 014 100 (720,1280,3)
16
+ 015 100 (720,1280,3)
17
+ 016 100 (720,1280,3)
18
+ 017 100 (720,1280,3)
19
+ 018 100 (720,1280,3)
20
+ 019 100 (720,1280,3)
21
+ 020 100 (720,1280,3)
22
+ 021 100 (720,1280,3)
23
+ 022 100 (720,1280,3)
24
+ 023 100 (720,1280,3)
25
+ 024 100 (720,1280,3)
26
+ 025 100 (720,1280,3)
27
+ 026 100 (720,1280,3)
28
+ 027 100 (720,1280,3)
29
+ 028 100 (720,1280,3)
30
+ 029 100 (720,1280,3)
31
+ 030 100 (720,1280,3)
32
+ 031 100 (720,1280,3)
33
+ 032 100 (720,1280,3)
34
+ 033 100 (720,1280,3)
35
+ 034 100 (720,1280,3)
36
+ 035 100 (720,1280,3)
37
+ 036 100 (720,1280,3)
38
+ 037 100 (720,1280,3)
39
+ 038 100 (720,1280,3)
40
+ 039 100 (720,1280,3)
41
+ 040 100 (720,1280,3)
42
+ 041 100 (720,1280,3)
43
+ 042 100 (720,1280,3)
44
+ 043 100 (720,1280,3)
45
+ 044 100 (720,1280,3)
46
+ 045 100 (720,1280,3)
47
+ 046 100 (720,1280,3)
48
+ 047 100 (720,1280,3)
49
+ 048 100 (720,1280,3)
50
+ 049 100 (720,1280,3)
51
+ 050 100 (720,1280,3)
52
+ 051 100 (720,1280,3)
53
+ 052 100 (720,1280,3)
54
+ 053 100 (720,1280,3)
55
+ 054 100 (720,1280,3)
56
+ 055 100 (720,1280,3)
57
+ 056 100 (720,1280,3)
58
+ 057 100 (720,1280,3)
59
+ 058 100 (720,1280,3)
60
+ 059 100 (720,1280,3)
61
+ 060 100 (720,1280,3)
62
+ 061 100 (720,1280,3)
63
+ 062 100 (720,1280,3)
64
+ 063 100 (720,1280,3)
65
+ 064 100 (720,1280,3)
66
+ 065 100 (720,1280,3)
67
+ 066 100 (720,1280,3)
68
+ 067 100 (720,1280,3)
69
+ 068 100 (720,1280,3)
70
+ 069 100 (720,1280,3)
71
+ 070 100 (720,1280,3)
72
+ 071 100 (720,1280,3)
73
+ 072 100 (720,1280,3)
74
+ 073 100 (720,1280,3)
75
+ 074 100 (720,1280,3)
76
+ 075 100 (720,1280,3)
77
+ 076 100 (720,1280,3)
78
+ 077 100 (720,1280,3)
79
+ 078 100 (720,1280,3)
80
+ 079 100 (720,1280,3)
81
+ 080 100 (720,1280,3)
82
+ 081 100 (720,1280,3)
83
+ 082 100 (720,1280,3)
84
+ 083 100 (720,1280,3)
85
+ 084 100 (720,1280,3)
86
+ 085 100 (720,1280,3)
87
+ 086 100 (720,1280,3)
88
+ 087 100 (720,1280,3)
89
+ 088 100 (720,1280,3)
90
+ 089 100 (720,1280,3)
91
+ 090 100 (720,1280,3)
92
+ 091 100 (720,1280,3)
93
+ 092 100 (720,1280,3)
94
+ 093 100 (720,1280,3)
95
+ 094 100 (720,1280,3)
96
+ 095 100 (720,1280,3)
97
+ 096 100 (720,1280,3)
98
+ 097 100 (720,1280,3)
99
+ 098 100 (720,1280,3)
100
+ 099 100 (720,1280,3)
101
+ 100 100 (720,1280,3)
102
+ 101 100 (720,1280,3)
103
+ 102 100 (720,1280,3)
104
+ 103 100 (720,1280,3)
105
+ 104 100 (720,1280,3)
106
+ 105 100 (720,1280,3)
107
+ 106 100 (720,1280,3)
108
+ 107 100 (720,1280,3)
109
+ 108 100 (720,1280,3)
110
+ 109 100 (720,1280,3)
111
+ 110 100 (720,1280,3)
112
+ 111 100 (720,1280,3)
113
+ 112 100 (720,1280,3)
114
+ 113 100 (720,1280,3)
115
+ 114 100 (720,1280,3)
116
+ 115 100 (720,1280,3)
117
+ 116 100 (720,1280,3)
118
+ 117 100 (720,1280,3)
119
+ 118 100 (720,1280,3)
120
+ 119 100 (720,1280,3)
121
+ 120 100 (720,1280,3)
122
+ 121 100 (720,1280,3)
123
+ 122 100 (720,1280,3)
124
+ 123 100 (720,1280,3)
125
+ 124 100 (720,1280,3)
126
+ 125 100 (720,1280,3)
127
+ 126 100 (720,1280,3)
128
+ 127 100 (720,1280,3)
129
+ 128 100 (720,1280,3)
130
+ 129 100 (720,1280,3)
131
+ 130 100 (720,1280,3)
132
+ 131 100 (720,1280,3)
133
+ 132 100 (720,1280,3)
134
+ 133 100 (720,1280,3)
135
+ 134 100 (720,1280,3)
136
+ 135 100 (720,1280,3)
137
+ 136 100 (720,1280,3)
138
+ 137 100 (720,1280,3)
139
+ 138 100 (720,1280,3)
140
+ 139 100 (720,1280,3)
141
+ 140 100 (720,1280,3)
142
+ 141 100 (720,1280,3)
143
+ 142 100 (720,1280,3)
144
+ 143 100 (720,1280,3)
145
+ 144 100 (720,1280,3)
146
+ 145 100 (720,1280,3)
147
+ 146 100 (720,1280,3)
148
+ 147 100 (720,1280,3)
149
+ 148 100 (720,1280,3)
150
+ 149 100 (720,1280,3)
151
+ 150 100 (720,1280,3)
152
+ 151 100 (720,1280,3)
153
+ 152 100 (720,1280,3)
154
+ 153 100 (720,1280,3)
155
+ 154 100 (720,1280,3)
156
+ 155 100 (720,1280,3)
157
+ 156 100 (720,1280,3)
158
+ 157 100 (720,1280,3)
159
+ 158 100 (720,1280,3)
160
+ 159 100 (720,1280,3)
161
+ 160 100 (720,1280,3)
162
+ 161 100 (720,1280,3)
163
+ 162 100 (720,1280,3)
164
+ 163 100 (720,1280,3)
165
+ 164 100 (720,1280,3)
166
+ 165 100 (720,1280,3)
167
+ 166 100 (720,1280,3)
168
+ 167 100 (720,1280,3)
169
+ 168 100 (720,1280,3)
170
+ 169 100 (720,1280,3)
171
+ 170 100 (720,1280,3)
172
+ 171 100 (720,1280,3)
173
+ 172 100 (720,1280,3)
174
+ 173 100 (720,1280,3)
175
+ 174 100 (720,1280,3)
176
+ 175 100 (720,1280,3)
177
+ 176 100 (720,1280,3)
178
+ 177 100 (720,1280,3)
179
+ 178 100 (720,1280,3)
180
+ 179 100 (720,1280,3)
181
+ 180 100 (720,1280,3)
182
+ 181 100 (720,1280,3)
183
+ 182 100 (720,1280,3)
184
+ 183 100 (720,1280,3)
185
+ 184 100 (720,1280,3)
186
+ 185 100 (720,1280,3)
187
+ 186 100 (720,1280,3)
188
+ 187 100 (720,1280,3)
189
+ 188 100 (720,1280,3)
190
+ 189 100 (720,1280,3)
191
+ 190 100 (720,1280,3)
192
+ 191 100 (720,1280,3)
193
+ 192 100 (720,1280,3)
194
+ 193 100 (720,1280,3)
195
+ 194 100 (720,1280,3)
196
+ 195 100 (720,1280,3)
197
+ 196 100 (720,1280,3)
198
+ 197 100 (720,1280,3)
199
+ 198 100 (720,1280,3)
200
+ 199 100 (720,1280,3)
201
+ 200 100 (720,1280,3)
202
+ 201 100 (720,1280,3)
203
+ 202 100 (720,1280,3)
204
+ 203 100 (720,1280,3)
205
+ 204 100 (720,1280,3)
206
+ 205 100 (720,1280,3)
207
+ 206 100 (720,1280,3)
208
+ 207 100 (720,1280,3)
209
+ 208 100 (720,1280,3)
210
+ 209 100 (720,1280,3)
211
+ 210 100 (720,1280,3)
212
+ 211 100 (720,1280,3)
213
+ 212 100 (720,1280,3)
214
+ 213 100 (720,1280,3)
215
+ 214 100 (720,1280,3)
216
+ 215 100 (720,1280,3)
217
+ 216 100 (720,1280,3)
218
+ 217 100 (720,1280,3)
219
+ 218 100 (720,1280,3)
220
+ 219 100 (720,1280,3)
221
+ 220 100 (720,1280,3)
222
+ 221 100 (720,1280,3)
223
+ 222 100 (720,1280,3)
224
+ 223 100 (720,1280,3)
225
+ 224 100 (720,1280,3)
226
+ 225 100 (720,1280,3)
227
+ 226 100 (720,1280,3)
228
+ 227 100 (720,1280,3)
229
+ 228 100 (720,1280,3)
230
+ 229 100 (720,1280,3)
231
+ 230 100 (720,1280,3)
232
+ 231 100 (720,1280,3)
233
+ 232 100 (720,1280,3)
234
+ 233 100 (720,1280,3)
235
+ 234 100 (720,1280,3)
236
+ 235 100 (720,1280,3)
237
+ 236 100 (720,1280,3)
238
+ 237 100 (720,1280,3)
239
+ 238 100 (720,1280,3)
240
+ 239 100 (720,1280,3)
241
+ 240 100 (720,1280,3)
242
+ 241 100 (720,1280,3)
243
+ 242 100 (720,1280,3)
244
+ 243 100 (720,1280,3)
245
+ 244 100 (720,1280,3)
246
+ 245 100 (720,1280,3)
247
+ 246 100 (720,1280,3)
248
+ 247 100 (720,1280,3)
249
+ 248 100 (720,1280,3)
250
+ 249 100 (720,1280,3)
251
+ 250 100 (720,1280,3)
252
+ 251 100 (720,1280,3)
253
+ 252 100 (720,1280,3)
254
+ 253 100 (720,1280,3)
255
+ 254 100 (720,1280,3)
256
+ 255 100 (720,1280,3)
257
+ 256 100 (720,1280,3)
258
+ 257 100 (720,1280,3)
259
+ 258 100 (720,1280,3)
260
+ 259 100 (720,1280,3)
261
+ 260 100 (720,1280,3)
262
+ 261 100 (720,1280,3)
263
+ 262 100 (720,1280,3)
264
+ 263 100 (720,1280,3)
265
+ 264 100 (720,1280,3)
266
+ 265 100 (720,1280,3)
267
+ 266 100 (720,1280,3)
268
+ 267 100 (720,1280,3)
269
+ 268 100 (720,1280,3)
270
+ 269 100 (720,1280,3)
NAFNet/basicsr/data/meta_info/meta_info_REDSofficial4_test_GT.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ 240 100 (720,1280,3)
2
+ 241 100 (720,1280,3)
3
+ 246 100 (720,1280,3)
4
+ 257 100 (720,1280,3)
NAFNet/basicsr/data/meta_info/meta_info_REDSval_official_test_GT.txt ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 240 100 (720,1280,3)
2
+ 241 100 (720,1280,3)
3
+ 242 100 (720,1280,3)
4
+ 243 100 (720,1280,3)
5
+ 244 100 (720,1280,3)
6
+ 245 100 (720,1280,3)
7
+ 246 100 (720,1280,3)
8
+ 247 100 (720,1280,3)
9
+ 248 100 (720,1280,3)
10
+ 249 100 (720,1280,3)
11
+ 250 100 (720,1280,3)
12
+ 251 100 (720,1280,3)
13
+ 252 100 (720,1280,3)
14
+ 253 100 (720,1280,3)
15
+ 254 100 (720,1280,3)
16
+ 255 100 (720,1280,3)
17
+ 256 100 (720,1280,3)
18
+ 257 100 (720,1280,3)
19
+ 258 100 (720,1280,3)
20
+ 259 100 (720,1280,3)
21
+ 260 100 (720,1280,3)
22
+ 261 100 (720,1280,3)
23
+ 262 100 (720,1280,3)
24
+ 263 100 (720,1280,3)
25
+ 264 100 (720,1280,3)
26
+ 265 100 (720,1280,3)
27
+ 266 100 (720,1280,3)
28
+ 267 100 (720,1280,3)
29
+ 268 100 (720,1280,3)
30
+ 269 100 (720,1280,3)
NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_GT.txt ADDED
The diff for this file is too large to render. See raw diff
 
NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_fast_GT.txt ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 00001/0625 7 (256,448,3)
2
+ 00001/0632 7 (256,448,3)
3
+ 00001/0807 7 (256,448,3)
4
+ 00001/0832 7 (256,448,3)
5
+ 00001/0834 7 (256,448,3)
6
+ 00001/0836 7 (256,448,3)
7
+ 00002/0004 7 (256,448,3)
8
+ 00002/0112 7 (256,448,3)
9
+ 00002/0116 7 (256,448,3)
10
+ 00002/0123 7 (256,448,3)
11
+ 00002/0455 7 (256,448,3)
12
+ 00002/0602 7 (256,448,3)
13
+ 00002/0976 7 (256,448,3)
14
+ 00002/0980 7 (256,448,3)
15
+ 00002/0983 7 (256,448,3)
16
+ 00002/1000 7 (256,448,3)
17
+ 00003/0022 7 (256,448,3)
18
+ 00003/0031 7 (256,448,3)
19
+ 00003/0035 7 (256,448,3)
20
+ 00003/0041 7 (256,448,3)
21
+ 00003/0073 7 (256,448,3)
22
+ 00003/0107 7 (256,448,3)
23
+ 00003/0111 7 (256,448,3)
24
+ 00003/0114 7 (256,448,3)
25
+ 00003/0117 7 (256,448,3)
26
+ 00003/0121 7 (256,448,3)
27
+ 00003/0499 7 (256,448,3)
28
+ 00003/0501 7 (256,448,3)
29
+ 00003/0507 7 (256,448,3)
30
+ 00003/0510 7 (256,448,3)
31
+ 00003/0517 7 (256,448,3)
32
+ 00003/0522 7 (256,448,3)
33
+ 00003/0531 7 (256,448,3)
34
+ 00003/0533 7 (256,448,3)
35
+ 00003/0534 7 (256,448,3)
36
+ 00003/0682 7 (256,448,3)
37
+ 00003/0687 7 (256,448,3)
38
+ 00003/0715 7 (256,448,3)
39
+ 00003/0742 7 (256,448,3)
40
+ 00003/0751 7 (256,448,3)
41
+ 00003/0984 7 (256,448,3)
42
+ 00004/0042 7 (256,448,3)
43
+ 00004/0165 7 (256,448,3)
44
+ 00004/0321 7 (256,448,3)
45
+ 00004/0569 7 (256,448,3)
46
+ 00004/0572 7 (256,448,3)
47
+ 00004/0619 7 (256,448,3)
48
+ 00004/0776 7 (256,448,3)
49
+ 00004/0780 7 (256,448,3)
50
+ 00004/0825 7 (256,448,3)
51
+ 00004/0832 7 (256,448,3)
52
+ 00004/0853 7 (256,448,3)
53
+ 00004/0876 7 (256,448,3)
54
+ 00004/0888 7 (256,448,3)
55
+ 00005/0015 7 (256,448,3)
56
+ 00005/0021 7 (256,448,3)
57
+ 00005/0022 7 (256,448,3)
58
+ 00005/0024 7 (256,448,3)
59
+ 00005/0026 7 (256,448,3)
60
+ 00005/0394 7 (256,448,3)
61
+ 00005/0403 7 (256,448,3)
62
+ 00005/0531 7 (256,448,3)
63
+ 00005/0546 7 (256,448,3)
64
+ 00005/0554 7 (256,448,3)
65
+ 00005/0694 7 (256,448,3)
66
+ 00005/0700 7 (256,448,3)
67
+ 00005/0740 7 (256,448,3)
68
+ 00005/0826 7 (256,448,3)
69
+ 00005/0832 7 (256,448,3)
70
+ 00005/0834 7 (256,448,3)
71
+ 00005/0943 7 (256,448,3)
72
+ 00006/0184 7 (256,448,3)
73
+ 00006/0205 7 (256,448,3)
74
+ 00006/0206 7 (256,448,3)
75
+ 00006/0211 7 (256,448,3)
76
+ 00006/0271 7 (256,448,3)
77
+ 00006/0273 7 (256,448,3)
78
+ 00006/0277 7 (256,448,3)
79
+ 00006/0283 7 (256,448,3)
80
+ 00006/0287 7 (256,448,3)
81
+ 00006/0298 7 (256,448,3)
82
+ 00006/0310 7 (256,448,3)
83
+ 00006/0356 7 (256,448,3)
84
+ 00006/0357 7 (256,448,3)
85
+ 00006/0544 7 (256,448,3)
86
+ 00006/0565 7 (256,448,3)
87
+ 00006/0569 7 (256,448,3)
88
+ 00006/0573 7 (256,448,3)
89
+ 00006/0592 7 (256,448,3)
90
+ 00006/0613 7 (256,448,3)
91
+ 00006/0633 7 (256,448,3)
92
+ 00006/0637 7 (256,448,3)
93
+ 00006/0646 7 (256,448,3)
94
+ 00006/0649 7 (256,448,3)
95
+ 00006/0655 7 (256,448,3)
96
+ 00006/0658 7 (256,448,3)
97
+ 00006/0662 7 (256,448,3)
98
+ 00006/0666 7 (256,448,3)
99
+ 00006/0673 7 (256,448,3)
100
+ 00007/0248 7 (256,448,3)
101
+ 00007/0253 7 (256,448,3)
102
+ 00007/0430 7 (256,448,3)
103
+ 00007/0434 7 (256,448,3)
104
+ 00007/0436 7 (256,448,3)
105
+ 00007/0452 7 (256,448,3)
106
+ 00007/0464 7 (256,448,3)
107
+ 00007/0470 7 (256,448,3)
108
+ 00007/0472 7 (256,448,3)
109
+ 00007/0483 7 (256,448,3)
110
+ 00007/0484 7 (256,448,3)
111
+ 00007/0493 7 (256,448,3)
112
+ 00007/0508 7 (256,448,3)
113
+ 00007/0514 7 (256,448,3)
114
+ 00007/0697 7 (256,448,3)
115
+ 00007/0698 7 (256,448,3)
116
+ 00007/0744 7 (256,448,3)
117
+ 00007/0775 7 (256,448,3)
118
+ 00007/0786 7 (256,448,3)
119
+ 00007/0790 7 (256,448,3)
120
+ 00007/0800 7 (256,448,3)
121
+ 00007/0833 7 (256,448,3)
122
+ 00007/0867 7 (256,448,3)
123
+ 00007/0879 7 (256,448,3)
124
+ 00007/0899 7 (256,448,3)
125
+ 00008/0251 7 (256,448,3)
126
+ 00008/0322 7 (256,448,3)
127
+ 00008/0971 7 (256,448,3)
128
+ 00008/0976 7 (256,448,3)
129
+ 00009/0016 7 (256,448,3)
130
+ 00009/0036 7 (256,448,3)
131
+ 00009/0037 7 (256,448,3)
132
+ 00009/0609 7 (256,448,3)
133
+ 00009/0812 7 (256,448,3)
134
+ 00009/0821 7 (256,448,3)
135
+ 00009/0947 7 (256,448,3)
136
+ 00009/0952 7 (256,448,3)
137
+ 00009/0955 7 (256,448,3)
138
+ 00009/0970 7 (256,448,3)
139
+ 00010/0072 7 (256,448,3)
140
+ 00010/0074 7 (256,448,3)
141
+ 00010/0079 7 (256,448,3)
142
+ 00010/0085 7 (256,448,3)
143
+ 00010/0139 7 (256,448,3)
144
+ 00010/0140 7 (256,448,3)
145
+ 00010/0183 7 (256,448,3)
146
+ 00010/0200 7 (256,448,3)
147
+ 00010/0223 7 (256,448,3)
148
+ 00010/0305 7 (256,448,3)
149
+ 00010/0323 7 (256,448,3)
150
+ 00010/0338 7 (256,448,3)
151
+ 00010/0342 7 (256,448,3)
152
+ 00010/0350 7 (256,448,3)
153
+ 00010/0356 7 (256,448,3)
154
+ 00010/0362 7 (256,448,3)
155
+ 00010/0366 7 (256,448,3)
156
+ 00010/0375 7 (256,448,3)
157
+ 00010/0404 7 (256,448,3)
158
+ 00010/0407 7 (256,448,3)
159
+ 00010/0414 7 (256,448,3)
160
+ 00010/0418 7 (256,448,3)
161
+ 00010/0429 7 (256,448,3)
162
+ 00010/0557 7 (256,448,3)
163
+ 00010/0564 7 (256,448,3)
164
+ 00010/0733 7 (256,448,3)
165
+ 00010/0935 7 (256,448,3)
166
+ 00010/0939 7 (256,448,3)
167
+ 00010/0943 7 (256,448,3)
168
+ 00011/0242 7 (256,448,3)
169
+ 00011/0259 7 (256,448,3)
170
+ 00011/0263 7 (256,448,3)
171
+ 00011/0266 7 (256,448,3)
172
+ 00011/0278 7 (256,448,3)
173
+ 00011/0890 7 (256,448,3)
174
+ 00011/0894 7 (256,448,3)
175
+ 00011/0903 7 (256,448,3)
176
+ 00011/0906 7 (256,448,3)
177
+ 00011/0913 7 (256,448,3)
178
+ 00012/0011 7 (256,448,3)
179
+ 00012/0014 7 (256,448,3)
180
+ 00012/0126 7 (256,448,3)
181
+ 00012/0127 7 (256,448,3)
182
+ 00012/0526 7 (256,448,3)
183
+ 00012/0551 7 (256,448,3)
184
+ 00012/0896 7 (256,448,3)
185
+ 00012/0910 7 (256,448,3)
186
+ 00012/0915 7 (256,448,3)
187
+ 00013/0167 7 (256,448,3)
188
+ 00013/0794 7 (256,448,3)
189
+ 00013/0807 7 (256,448,3)
190
+ 00013/0846 7 (256,448,3)
191
+ 00013/0882 7 (256,448,3)
192
+ 00013/0889 7 (256,448,3)
193
+ 00013/0910 7 (256,448,3)
194
+ 00013/0913 7 (256,448,3)
195
+ 00013/0924 7 (256,448,3)
196
+ 00013/0931 7 (256,448,3)
197
+ 00013/0944 7 (256,448,3)
198
+ 00013/0955 7 (256,448,3)
199
+ 00013/0962 7 (256,448,3)
200
+ 00013/0969 7 (256,448,3)
201
+ 00014/0012 7 (256,448,3)
202
+ 00014/0025 7 (256,448,3)
203
+ 00014/0473 7 (256,448,3)
204
+ 00014/0499 7 (256,448,3)
205
+ 00014/0524 7 (256,448,3)
206
+ 00014/0739 7 (256,448,3)
207
+ 00014/0753 7 (256,448,3)
208
+ 00014/0771 7 (256,448,3)
209
+ 00014/0832 7 (256,448,3)
210
+ 00014/0836 7 (256,448,3)
211
+ 00014/0838 7 (256,448,3)
212
+ 00014/0839 7 (256,448,3)
213
+ 00014/0843 7 (256,448,3)
214
+ 00014/0846 7 (256,448,3)
215
+ 00014/0849 7 (256,448,3)
216
+ 00014/0859 7 (256,448,3)
217
+ 00014/0880 7 (256,448,3)
218
+ 00014/0906 7 (256,448,3)
219
+ 00015/0030 7 (256,448,3)
220
+ 00015/0067 7 (256,448,3)
221
+ 00015/0084 7 (256,448,3)
222
+ 00015/0190 7 (256,448,3)
223
+ 00015/0575 7 (256,448,3)
224
+ 00015/0784 7 (256,448,3)
225
+ 00015/0855 7 (256,448,3)
226
+ 00015/0904 7 (256,448,3)
227
+ 00015/0914 7 (256,448,3)
228
+ 00015/0936 7 (256,448,3)
229
+ 00015/0939 7 (256,448,3)
230
+ 00015/0943 7 (256,448,3)
231
+ 00015/0957 7 (256,448,3)
232
+ 00016/0131 7 (256,448,3)
233
+ 00016/0173 7 (256,448,3)
234
+ 00016/0320 7 (256,448,3)
235
+ 00016/0328 7 (256,448,3)
236
+ 00016/0334 7 (256,448,3)
237
+ 00016/0338 7 (256,448,3)
238
+ 00016/0339 7 (256,448,3)
239
+ 00016/0345 7 (256,448,3)
240
+ 00016/0365 7 (256,448,3)
241
+ 00016/0584 7 (256,448,3)
242
+ 00016/0634 7 (256,448,3)
243
+ 00017/0342 7 (256,448,3)
244
+ 00017/0346 7 (256,448,3)
245
+ 00017/0350 7 (256,448,3)
246
+ 00017/0766 7 (256,448,3)
247
+ 00017/0786 7 (256,448,3)
248
+ 00017/0911 7 (256,448,3)
249
+ 00017/0914 7 (256,448,3)
250
+ 00018/0217 7 (256,448,3)
251
+ 00018/0258 7 (256,448,3)
252
+ 00018/0307 7 (256,448,3)
253
+ 00018/0480 7 (256,448,3)
254
+ 00018/0491 7 (256,448,3)
255
+ 00018/0994 7 (256,448,3)
256
+ 00018/0995 7 (256,448,3)
257
+ 00018/0997 7 (256,448,3)
258
+ 00018/1000 7 (256,448,3)
259
+ 00019/0007 7 (256,448,3)
260
+ 00019/0016 7 (256,448,3)
261
+ 00019/0026 7 (256,448,3)
262
+ 00019/0030 7 (256,448,3)
263
+ 00019/0086 7 (256,448,3)
264
+ 00019/0089 7 (256,448,3)
265
+ 00019/0111 7 (256,448,3)
266
+ 00019/0285 7 (256,448,3)
267
+ 00019/0415 7 (256,448,3)
268
+ 00019/0434 7 (256,448,3)
269
+ 00019/0437 7 (256,448,3)
270
+ 00019/0568 7 (256,448,3)
271
+ 00019/0570 7 (256,448,3)
272
+ 00019/0591 7 (256,448,3)
273
+ 00019/0596 7 (256,448,3)
274
+ 00019/0603 7 (256,448,3)
275
+ 00019/0607 7 (256,448,3)
276
+ 00019/0637 7 (256,448,3)
277
+ 00019/0644 7 (256,448,3)
278
+ 00019/0647 7 (256,448,3)
279
+ 00019/0787 7 (256,448,3)
280
+ 00019/0993 7 (256,448,3)
281
+ 00019/0998 7 (256,448,3)
282
+ 00021/0232 7 (256,448,3)
283
+ 00021/0255 7 (256,448,3)
284
+ 00021/0646 7 (256,448,3)
285
+ 00021/0653 7 (256,448,3)
286
+ 00021/0657 7 (256,448,3)
287
+ 00021/0668 7 (256,448,3)
288
+ 00021/0672 7 (256,448,3)
289
+ 00021/0725 7 (256,448,3)
290
+ 00021/0750 7 (256,448,3)
291
+ 00021/0764 7 (256,448,3)
292
+ 00021/0821 7 (256,448,3)
293
+ 00022/0192 7 (256,448,3)
294
+ 00022/0391 7 (256,448,3)
295
+ 00022/0514 7 (256,448,3)
296
+ 00022/0567 7 (256,448,3)
297
+ 00022/0674 7 (256,448,3)
298
+ 00022/0686 7 (256,448,3)
299
+ 00022/0700 7 (256,448,3)
300
+ 00023/0020 7 (256,448,3)
301
+ 00023/0024 7 (256,448,3)
302
+ 00023/0025 7 (256,448,3)
303
+ 00023/0042 7 (256,448,3)
304
+ 00023/0050 7 (256,448,3)
305
+ 00023/0094 7 (256,448,3)
306
+ 00023/0107 7 (256,448,3)
307
+ 00023/0635 7 (256,448,3)
308
+ 00023/0698 7 (256,448,3)
309
+ 00023/0774 7 (256,448,3)
310
+ 00023/0795 7 (256,448,3)
311
+ 00023/0821 7 (256,448,3)
312
+ 00023/0839 7 (256,448,3)
313
+ 00023/0846 7 (256,448,3)
314
+ 00023/0869 7 (256,448,3)
315
+ 00023/0879 7 (256,448,3)
316
+ 00023/0887 7 (256,448,3)
317
+ 00023/0899 7 (256,448,3)
318
+ 00023/0910 7 (256,448,3)
319
+ 00023/0920 7 (256,448,3)
320
+ 00023/0929 7 (256,448,3)
321
+ 00023/0941 7 (256,448,3)
322
+ 00023/0942 7 (256,448,3)
323
+ 00023/0952 7 (256,448,3)
324
+ 00024/0066 7 (256,448,3)
325
+ 00024/0072 7 (256,448,3)
326
+ 00024/0080 7 (256,448,3)
327
+ 00024/0093 7 (256,448,3)
328
+ 00024/0107 7 (256,448,3)
329
+ 00024/0262 7 (256,448,3)
330
+ 00024/0283 7 (256,448,3)
331
+ 00024/0294 7 (256,448,3)
332
+ 00024/0296 7 (256,448,3)
333
+ 00024/0304 7 (256,448,3)
334
+ 00024/0315 7 (256,448,3)
335
+ 00024/0322 7 (256,448,3)
336
+ 00024/0648 7 (256,448,3)
337
+ 00024/0738 7 (256,448,3)
338
+ 00024/0743 7 (256,448,3)
339
+ 00025/0542 7 (256,448,3)
340
+ 00025/0769 7 (256,448,3)
341
+ 00025/0984 7 (256,448,3)
342
+ 00025/0985 7 (256,448,3)
343
+ 00025/0989 7 (256,448,3)
344
+ 00025/0991 7 (256,448,3)
345
+ 00026/0009 7 (256,448,3)
346
+ 00026/0013 7 (256,448,3)
347
+ 00026/0020 7 (256,448,3)
348
+ 00026/0021 7 (256,448,3)
349
+ 00026/0025 7 (256,448,3)
350
+ 00026/0135 7 (256,448,3)
351
+ 00026/0200 7 (256,448,3)
352
+ 00026/0297 7 (256,448,3)
353
+ 00026/0306 7 (256,448,3)
354
+ 00026/0444 7 (256,448,3)
355
+ 00026/0450 7 (256,448,3)
356
+ 00026/0453 7 (256,448,3)
357
+ 00026/0464 7 (256,448,3)
358
+ 00026/0486 7 (256,448,3)
359
+ 00026/0773 7 (256,448,3)
360
+ 00026/0785 7 (256,448,3)
361
+ 00026/0836 7 (256,448,3)
362
+ 00026/0838 7 (256,448,3)
363
+ 00026/0848 7 (256,448,3)
364
+ 00026/0885 7 (256,448,3)
365
+ 00026/0893 7 (256,448,3)
366
+ 00026/0939 7 (256,448,3)
367
+ 00026/0942 7 (256,448,3)
368
+ 00027/0092 7 (256,448,3)
369
+ 00027/0112 7 (256,448,3)
370
+ 00027/0115 7 (256,448,3)
371
+ 00027/0143 7 (256,448,3)
372
+ 00027/0175 7 (256,448,3)
373
+ 00027/0179 7 (256,448,3)
374
+ 00027/0183 7 (256,448,3)
375
+ 00027/0197 7 (256,448,3)
376
+ 00027/0199 7 (256,448,3)
377
+ 00027/0300 7 (256,448,3)
378
+ 00028/0015 7 (256,448,3)
379
+ 00028/0032 7 (256,448,3)
380
+ 00028/0048 7 (256,448,3)
381
+ 00028/0068 7 (256,448,3)
382
+ 00028/0219 7 (256,448,3)
383
+ 00028/0606 7 (256,448,3)
384
+ 00028/0626 7 (256,448,3)
385
+ 00028/0748 7 (256,448,3)
386
+ 00028/0764 7 (256,448,3)
387
+ 00028/0772 7 (256,448,3)
388
+ 00028/0780 7 (256,448,3)
389
+ 00028/0926 7 (256,448,3)
390
+ 00028/0947 7 (256,448,3)
391
+ 00028/0962 7 (256,448,3)
392
+ 00029/0085 7 (256,448,3)
393
+ 00029/0281 7 (256,448,3)
394
+ 00029/0284 7 (256,448,3)
395
+ 00029/0288 7 (256,448,3)
396
+ 00029/0294 7 (256,448,3)
397
+ 00029/0364 7 (256,448,3)
398
+ 00029/0369 7 (256,448,3)
399
+ 00029/0421 7 (256,448,3)
400
+ 00029/0425 7 (256,448,3)
401
+ 00029/0550 7 (256,448,3)
402
+ 00030/0014 7 (256,448,3)
403
+ 00030/0101 7 (256,448,3)
404
+ 00030/0143 7 (256,448,3)
405
+ 00030/0351 7 (256,448,3)
406
+ 00030/0356 7 (256,448,3)
407
+ 00030/0371 7 (256,448,3)
408
+ 00030/0484 7 (256,448,3)
409
+ 00030/0492 7 (256,448,3)
410
+ 00030/0503 7 (256,448,3)
411
+ 00030/0682 7 (256,448,3)
412
+ 00030/0696 7 (256,448,3)
413
+ 00030/0735 7 (256,448,3)
414
+ 00030/0737 7 (256,448,3)
415
+ 00030/0868 7 (256,448,3)
416
+ 00031/0161 7 (256,448,3)
417
+ 00031/0180 7 (256,448,3)
418
+ 00031/0194 7 (256,448,3)
419
+ 00031/0253 7 (256,448,3)
420
+ 00031/0293 7 (256,448,3)
421
+ 00031/0466 7 (256,448,3)
422
+ 00031/0477 7 (256,448,3)
423
+ 00031/0549 7 (256,448,3)
424
+ 00031/0600 7 (256,448,3)
425
+ 00031/0617 7 (256,448,3)
426
+ 00031/0649 7 (256,448,3)
427
+ 00032/0015 7 (256,448,3)
428
+ 00032/0020 7 (256,448,3)
429
+ 00032/0023 7 (256,448,3)
430
+ 00032/0048 7 (256,448,3)
431
+ 00032/0056 7 (256,448,3)
432
+ 00032/0872 7 (256,448,3)
433
+ 00033/0069 7 (256,448,3)
434
+ 00033/0073 7 (256,448,3)
435
+ 00033/0078 7 (256,448,3)
436
+ 00033/0079 7 (256,448,3)
437
+ 00033/0086 7 (256,448,3)
438
+ 00033/0088 7 (256,448,3)
439
+ 00033/0091 7 (256,448,3)
440
+ 00033/0096 7 (256,448,3)
441
+ 00033/0607 7 (256,448,3)
442
+ 00033/0613 7 (256,448,3)
443
+ 00033/0616 7 (256,448,3)
444
+ 00033/0619 7 (256,448,3)
445
+ 00033/0626 7 (256,448,3)
446
+ 00033/0628 7 (256,448,3)
447
+ 00033/0637 7 (256,448,3)
448
+ 00033/0686 7 (256,448,3)
449
+ 00033/0842 7 (256,448,3)
450
+ 00034/0261 7 (256,448,3)
451
+ 00034/0265 7 (256,448,3)
452
+ 00034/0269 7 (256,448,3)
453
+ 00034/0275 7 (256,448,3)
454
+ 00034/0286 7 (256,448,3)
455
+ 00034/0294 7 (256,448,3)
456
+ 00034/0431 7 (256,448,3)
457
+ 00034/0577 7 (256,448,3)
458
+ 00034/0685 7 (256,448,3)
459
+ 00034/0687 7 (256,448,3)
460
+ 00034/0703 7 (256,448,3)
461
+ 00034/0715 7 (256,448,3)
462
+ 00034/0935 7 (256,448,3)
463
+ 00034/0943 7 (256,448,3)
464
+ 00034/0963 7 (256,448,3)
465
+ 00034/0979 7 (256,448,3)
466
+ 00034/0990 7 (256,448,3)
467
+ 00035/0129 7 (256,448,3)
468
+ 00035/0153 7 (256,448,3)
469
+ 00035/0156 7 (256,448,3)
470
+ 00035/0474 7 (256,448,3)
471
+ 00035/0507 7 (256,448,3)
472
+ 00035/0532 7 (256,448,3)
473
+ 00035/0560 7 (256,448,3)
474
+ 00035/0572 7 (256,448,3)
475
+ 00035/0587 7 (256,448,3)
476
+ 00035/0588 7 (256,448,3)
477
+ 00035/0640 7 (256,448,3)
478
+ 00035/0654 7 (256,448,3)
479
+ 00035/0655 7 (256,448,3)
480
+ 00035/0737 7 (256,448,3)
481
+ 00035/0843 7 (256,448,3)
482
+ 00035/0932 7 (256,448,3)
483
+ 00035/0957 7 (256,448,3)
484
+ 00036/0029 7 (256,448,3)
485
+ 00036/0266 7 (256,448,3)
486
+ 00036/0276 7 (256,448,3)
487
+ 00036/0310 7 (256,448,3)
488
+ 00036/0314 7 (256,448,3)
489
+ 00036/0320 7 (256,448,3)
490
+ 00036/0333 7 (256,448,3)
491
+ 00036/0348 7 (256,448,3)
492
+ 00036/0357 7 (256,448,3)
493
+ 00036/0360 7 (256,448,3)
494
+ 00036/0368 7 (256,448,3)
495
+ 00036/0371 7 (256,448,3)
496
+ 00036/0378 7 (256,448,3)
497
+ 00036/0391 7 (256,448,3)
498
+ 00036/0440 7 (256,448,3)
499
+ 00036/0731 7 (256,448,3)
500
+ 00036/0733 7 (256,448,3)
501
+ 00036/0741 7 (256,448,3)
502
+ 00036/0743 7 (256,448,3)
503
+ 00036/0927 7 (256,448,3)
504
+ 00036/0931 7 (256,448,3)
505
+ 00036/0933 7 (256,448,3)
506
+ 00036/0938 7 (256,448,3)
507
+ 00036/0944 7 (256,448,3)
508
+ 00036/0946 7 (256,448,3)
509
+ 00036/0951 7 (256,448,3)
510
+ 00036/0953 7 (256,448,3)
511
+ 00036/0963 7 (256,448,3)
512
+ 00036/0964 7 (256,448,3)
513
+ 00036/0981 7 (256,448,3)
514
+ 00036/0991 7 (256,448,3)
515
+ 00037/0072 7 (256,448,3)
516
+ 00037/0079 7 (256,448,3)
517
+ 00037/0132 7 (256,448,3)
518
+ 00037/0135 7 (256,448,3)
519
+ 00037/0137 7 (256,448,3)
520
+ 00037/0141 7 (256,448,3)
521
+ 00037/0229 7 (256,448,3)
522
+ 00037/0234 7 (256,448,3)
523
+ 00037/0239 7 (256,448,3)
524
+ 00037/0242 7 (256,448,3)
525
+ 00037/0254 7 (256,448,3)
526
+ 00037/0269 7 (256,448,3)
527
+ 00037/0276 7 (256,448,3)
528
+ 00037/0279 7 (256,448,3)
529
+ 00037/0286 7 (256,448,3)
530
+ 00037/0345 7 (256,448,3)
531
+ 00037/0449 7 (256,448,3)
532
+ 00037/0450 7 (256,448,3)
533
+ 00037/0820 7 (256,448,3)
534
+ 00037/0824 7 (256,448,3)
535
+ 00037/0859 7 (256,448,3)
536
+ 00037/0899 7 (256,448,3)
537
+ 00037/0906 7 (256,448,3)
538
+ 00038/0535 7 (256,448,3)
539
+ 00038/0572 7 (256,448,3)
540
+ 00038/0675 7 (256,448,3)
541
+ 00038/0731 7 (256,448,3)
542
+ 00038/0732 7 (256,448,3)
543
+ 00038/0744 7 (256,448,3)
544
+ 00038/0755 7 (256,448,3)
545
+ 00039/0002 7 (256,448,3)
546
+ 00039/0013 7 (256,448,3)
547
+ 00039/0247 7 (256,448,3)
548
+ 00039/0489 7 (256,448,3)
549
+ 00039/0504 7 (256,448,3)
550
+ 00039/0558 7 (256,448,3)
551
+ 00039/0686 7 (256,448,3)
552
+ 00039/0727 7 (256,448,3)
553
+ 00039/0769 7 (256,448,3)
554
+ 00040/0081 7 (256,448,3)
555
+ 00040/0082 7 (256,448,3)
556
+ 00040/0402 7 (256,448,3)
557
+ 00040/0407 7 (256,448,3)
558
+ 00040/0408 7 (256,448,3)
559
+ 00040/0410 7 (256,448,3)
560
+ 00040/0411 7 (256,448,3)
561
+ 00040/0412 7 (256,448,3)
562
+ 00040/0413 7 (256,448,3)
563
+ 00040/0415 7 (256,448,3)
564
+ 00040/0421 7 (256,448,3)
565
+ 00040/0422 7 (256,448,3)
566
+ 00040/0426 7 (256,448,3)
567
+ 00040/0438 7 (256,448,3)
568
+ 00040/0439 7 (256,448,3)
569
+ 00040/0440 7 (256,448,3)
570
+ 00040/0443 7 (256,448,3)
571
+ 00040/0457 7 (256,448,3)
572
+ 00040/0459 7 (256,448,3)
573
+ 00040/0725 7 (256,448,3)
574
+ 00040/0727 7 (256,448,3)
575
+ 00040/0936 7 (256,448,3)
576
+ 00040/0959 7 (256,448,3)
577
+ 00040/0964 7 (256,448,3)
578
+ 00040/0968 7 (256,448,3)
579
+ 00040/0974 7 (256,448,3)
580
+ 00040/0978 7 (256,448,3)
581
+ 00040/0979 7 (256,448,3)
582
+ 00040/0989 7 (256,448,3)
583
+ 00040/0993 7 (256,448,3)
584
+ 00040/0994 7 (256,448,3)
585
+ 00040/0997 7 (256,448,3)
586
+ 00041/0001 7 (256,448,3)
587
+ 00041/0007 7 (256,448,3)
588
+ 00041/0019 7 (256,448,3)
589
+ 00041/0040 7 (256,448,3)
590
+ 00041/0350 7 (256,448,3)
591
+ 00041/0357 7 (256,448,3)
592
+ 00041/0393 7 (256,448,3)
593
+ 00041/0890 7 (256,448,3)
594
+ 00041/0909 7 (256,448,3)
595
+ 00041/0915 7 (256,448,3)
596
+ 00041/0933 7 (256,448,3)
597
+ 00042/0017 7 (256,448,3)
598
+ 00042/0332 7 (256,448,3)
599
+ 00042/0346 7 (256,448,3)
600
+ 00042/0350 7 (256,448,3)
601
+ 00042/0356 7 (256,448,3)
602
+ 00042/0382 7 (256,448,3)
603
+ 00042/0389 7 (256,448,3)
604
+ 00042/0539 7 (256,448,3)
605
+ 00042/0546 7 (256,448,3)
606
+ 00042/0550 7 (256,448,3)
607
+ 00042/0553 7 (256,448,3)
608
+ 00042/0555 7 (256,448,3)
609
+ 00042/0560 7 (256,448,3)
610
+ 00042/0570 7 (256,448,3)
611
+ 00043/0119 7 (256,448,3)
612
+ 00043/0122 7 (256,448,3)
613
+ 00043/0168 7 (256,448,3)
614
+ 00043/0274 7 (256,448,3)
615
+ 00043/0304 7 (256,448,3)
616
+ 00043/0731 7 (256,448,3)
617
+ 00043/0735 7 (256,448,3)
618
+ 00043/0739 7 (256,448,3)
619
+ 00043/0740 7 (256,448,3)
620
+ 00044/0212 7 (256,448,3)
621
+ 00044/0432 7 (256,448,3)
622
+ 00044/0934 7 (256,448,3)
623
+ 00044/0940 7 (256,448,3)
624
+ 00044/0987 7 (256,448,3)
625
+ 00045/0004 7 (256,448,3)
626
+ 00045/0009 7 (256,448,3)
627
+ 00045/0011 7 (256,448,3)
628
+ 00045/0019 7 (256,448,3)
629
+ 00045/0023 7 (256,448,3)
630
+ 00045/0289 7 (256,448,3)
631
+ 00045/0760 7 (256,448,3)
632
+ 00045/0779 7 (256,448,3)
633
+ 00045/0816 7 (256,448,3)
634
+ 00045/0820 7 (256,448,3)
635
+ 00046/0132 7 (256,448,3)
636
+ 00046/0350 7 (256,448,3)
637
+ 00046/0356 7 (256,448,3)
638
+ 00046/0357 7 (256,448,3)
639
+ 00046/0379 7 (256,448,3)
640
+ 00046/0410 7 (256,448,3)
641
+ 00046/0412 7 (256,448,3)
642
+ 00046/0481 7 (256,448,3)
643
+ 00046/0497 7 (256,448,3)
644
+ 00046/0510 7 (256,448,3)
645
+ 00046/0515 7 (256,448,3)
646
+ 00046/0529 7 (256,448,3)
647
+ 00046/0544 7 (256,448,3)
648
+ 00046/0545 7 (256,448,3)
649
+ 00046/0552 7 (256,448,3)
650
+ 00046/0559 7 (256,448,3)
651
+ 00046/0589 7 (256,448,3)
652
+ 00046/0642 7 (256,448,3)
653
+ 00046/0724 7 (256,448,3)
654
+ 00046/0758 7 (256,448,3)
655
+ 00046/0930 7 (256,448,3)
656
+ 00046/0953 7 (256,448,3)
657
+ 00047/0013 7 (256,448,3)
658
+ 00047/0014 7 (256,448,3)
659
+ 00047/0017 7 (256,448,3)
660
+ 00047/0076 7 (256,448,3)
661
+ 00047/0151 7 (256,448,3)
662
+ 00047/0797 7 (256,448,3)
663
+ 00048/0014 7 (256,448,3)
664
+ 00048/0021 7 (256,448,3)
665
+ 00048/0026 7 (256,448,3)
666
+ 00048/0030 7 (256,448,3)
667
+ 00048/0039 7 (256,448,3)
668
+ 00048/0045 7 (256,448,3)
669
+ 00048/0049 7 (256,448,3)
670
+ 00048/0145 7 (256,448,3)
671
+ 00048/0188 7 (256,448,3)
672
+ 00048/0302 7 (256,448,3)
673
+ 00048/0361 7 (256,448,3)
674
+ 00048/0664 7 (256,448,3)
675
+ 00048/0672 7 (256,448,3)
676
+ 00048/0681 7 (256,448,3)
677
+ 00048/0689 7 (256,448,3)
678
+ 00048/0690 7 (256,448,3)
679
+ 00048/0691 7 (256,448,3)
680
+ 00048/0711 7 (256,448,3)
681
+ 00049/0085 7 (256,448,3)
682
+ 00049/0810 7 (256,448,3)
683
+ 00049/0858 7 (256,448,3)
684
+ 00049/0865 7 (256,448,3)
685
+ 00049/0871 7 (256,448,3)
686
+ 00049/0903 7 (256,448,3)
687
+ 00049/0928 7 (256,448,3)
688
+ 00050/0092 7 (256,448,3)
689
+ 00050/0101 7 (256,448,3)
690
+ 00050/0108 7 (256,448,3)
691
+ 00050/0112 7 (256,448,3)
692
+ 00050/0120 7 (256,448,3)
693
+ 00050/0128 7 (256,448,3)
694
+ 00050/0383 7 (256,448,3)
695
+ 00050/0395 7 (256,448,3)
696
+ 00050/0405 7 (256,448,3)
697
+ 00050/0632 7 (256,448,3)
698
+ 00050/0648 7 (256,448,3)
699
+ 00050/0649 7 (256,448,3)
700
+ 00050/0659 7 (256,448,3)
701
+ 00050/0699 7 (256,448,3)
702
+ 00050/0708 7 (256,448,3)
703
+ 00050/0716 7 (256,448,3)
704
+ 00050/0758 7 (256,448,3)
705
+ 00050/0761 7 (256,448,3)
706
+ 00051/0572 7 (256,448,3)
707
+ 00052/0163 7 (256,448,3)
708
+ 00052/0242 7 (256,448,3)
709
+ 00052/0260 7 (256,448,3)
710
+ 00052/0322 7 (256,448,3)
711
+ 00052/0333 7 (256,448,3)
712
+ 00052/0806 7 (256,448,3)
713
+ 00052/0813 7 (256,448,3)
714
+ 00052/0821 7 (256,448,3)
715
+ 00052/0830 7 (256,448,3)
716
+ 00052/0914 7 (256,448,3)
717
+ 00052/0923 7 (256,448,3)
718
+ 00052/0959 7 (256,448,3)
719
+ 00053/0288 7 (256,448,3)
720
+ 00053/0290 7 (256,448,3)
721
+ 00053/0323 7 (256,448,3)
722
+ 00053/0337 7 (256,448,3)
723
+ 00053/0340 7 (256,448,3)
724
+ 00053/0437 7 (256,448,3)
725
+ 00053/0595 7 (256,448,3)
726
+ 00053/0739 7 (256,448,3)
727
+ 00053/0761 7 (256,448,3)
728
+ 00054/0014 7 (256,448,3)
729
+ 00054/0017 7 (256,448,3)
730
+ 00054/0178 7 (256,448,3)
731
+ 00054/0183 7 (256,448,3)
732
+ 00054/0196 7 (256,448,3)
733
+ 00054/0205 7 (256,448,3)
734
+ 00054/0214 7 (256,448,3)
735
+ 00054/0289 7 (256,448,3)
736
+ 00054/0453 7 (256,448,3)
737
+ 00054/0498 7 (256,448,3)
738
+ 00054/0502 7 (256,448,3)
739
+ 00054/0514 7 (256,448,3)
740
+ 00054/0773 7 (256,448,3)
741
+ 00055/0001 7 (256,448,3)
742
+ 00055/0115 7 (256,448,3)
743
+ 00055/0118 7 (256,448,3)
744
+ 00055/0171 7 (256,448,3)
745
+ 00055/0214 7 (256,448,3)
746
+ 00055/0354 7 (256,448,3)
747
+ 00055/0449 7 (256,448,3)
748
+ 00055/0473 7 (256,448,3)
749
+ 00055/0649 7 (256,448,3)
750
+ 00055/0800 7 (256,448,3)
751
+ 00055/0803 7 (256,448,3)
752
+ 00055/0990 7 (256,448,3)
753
+ 00056/0041 7 (256,448,3)
754
+ 00056/0120 7 (256,448,3)
755
+ 00056/0293 7 (256,448,3)
756
+ 00056/0357 7 (256,448,3)
757
+ 00056/0506 7 (256,448,3)
758
+ 00056/0561 7 (256,448,3)
759
+ 00056/0567 7 (256,448,3)
760
+ 00056/0575 7 (256,448,3)
761
+ 00057/0175 7 (256,448,3)
762
+ 00057/0495 7 (256,448,3)
763
+ 00057/0498 7 (256,448,3)
764
+ 00057/0506 7 (256,448,3)
765
+ 00057/0612 7 (256,448,3)
766
+ 00057/0620 7 (256,448,3)
767
+ 00057/0623 7 (256,448,3)
768
+ 00057/0635 7 (256,448,3)
769
+ 00057/0773 7 (256,448,3)
770
+ 00057/0778 7 (256,448,3)
771
+ 00057/0867 7 (256,448,3)
772
+ 00057/0976 7 (256,448,3)
773
+ 00057/0980 7 (256,448,3)
774
+ 00057/0985 7 (256,448,3)
775
+ 00057/0992 7 (256,448,3)
776
+ 00058/0009 7 (256,448,3)
777
+ 00058/0076 7 (256,448,3)
778
+ 00058/0078 7 (256,448,3)
779
+ 00058/0279 7 (256,448,3)
780
+ 00058/0283 7 (256,448,3)
781
+ 00058/0286 7 (256,448,3)
782
+ 00058/0350 7 (256,448,3)
783
+ 00058/0380 7 (256,448,3)
784
+ 00061/0132 7 (256,448,3)
785
+ 00061/0141 7 (256,448,3)
786
+ 00061/0156 7 (256,448,3)
787
+ 00061/0159 7 (256,448,3)
788
+ 00061/0168 7 (256,448,3)
789
+ 00061/0170 7 (256,448,3)
790
+ 00061/0186 7 (256,448,3)
791
+ 00061/0219 7 (256,448,3)
792
+ 00061/0227 7 (256,448,3)
793
+ 00061/0238 7 (256,448,3)
794
+ 00061/0256 7 (256,448,3)
795
+ 00061/0303 7 (256,448,3)
796
+ 00061/0312 7 (256,448,3)
797
+ 00061/0313 7 (256,448,3)
798
+ 00061/0325 7 (256,448,3)
799
+ 00061/0367 7 (256,448,3)
800
+ 00061/0369 7 (256,448,3)
801
+ 00061/0387 7 (256,448,3)
802
+ 00061/0396 7 (256,448,3)
803
+ 00061/0486 7 (256,448,3)
804
+ 00061/0895 7 (256,448,3)
805
+ 00061/0897 7 (256,448,3)
806
+ 00062/0846 7 (256,448,3)
807
+ 00063/0156 7 (256,448,3)
808
+ 00063/0184 7 (256,448,3)
809
+ 00063/0191 7 (256,448,3)
810
+ 00063/0334 7 (256,448,3)
811
+ 00063/0350 7 (256,448,3)
812
+ 00063/0499 7 (256,448,3)
813
+ 00063/0878 7 (256,448,3)
814
+ 00064/0004 7 (256,448,3)
815
+ 00064/0264 7 (256,448,3)
816
+ 00064/0735 7 (256,448,3)
817
+ 00064/0738 7 (256,448,3)
818
+ 00065/0105 7 (256,448,3)
819
+ 00065/0169 7 (256,448,3)
820
+ 00065/0305 7 (256,448,3)
821
+ 00065/0324 7 (256,448,3)
822
+ 00065/0353 7 (256,448,3)
823
+ 00065/0520 7 (256,448,3)
824
+ 00065/0533 7 (256,448,3)
825
+ 00065/0545 7 (256,448,3)
826
+ 00065/0551 7 (256,448,3)
827
+ 00065/0568 7 (256,448,3)
828
+ 00065/0603 7 (256,448,3)
829
+ 00065/0884 7 (256,448,3)
830
+ 00065/0988 7 (256,448,3)
831
+ 00066/0002 7 (256,448,3)
832
+ 00066/0011 7 (256,448,3)
833
+ 00066/0031 7 (256,448,3)
834
+ 00066/0037 7 (256,448,3)
835
+ 00066/0136 7 (256,448,3)
836
+ 00066/0137 7 (256,448,3)
837
+ 00066/0150 7 (256,448,3)
838
+ 00066/0166 7 (256,448,3)
839
+ 00066/0178 7 (256,448,3)
840
+ 00066/0357 7 (256,448,3)
841
+ 00066/0428 7 (256,448,3)
842
+ 00066/0483 7 (256,448,3)
843
+ 00066/0600 7 (256,448,3)
844
+ 00066/0863 7 (256,448,3)
845
+ 00066/0873 7 (256,448,3)
846
+ 00066/0875 7 (256,448,3)
847
+ 00066/0899 7 (256,448,3)
848
+ 00067/0020 7 (256,448,3)
849
+ 00067/0025 7 (256,448,3)
850
+ 00067/0132 7 (256,448,3)
851
+ 00067/0492 7 (256,448,3)
852
+ 00067/0726 7 (256,448,3)
853
+ 00067/0734 7 (256,448,3)
854
+ 00067/0744 7 (256,448,3)
855
+ 00067/0754 7 (256,448,3)
856
+ 00067/0779 7 (256,448,3)
857
+ 00068/0078 7 (256,448,3)
858
+ 00068/0083 7 (256,448,3)
859
+ 00068/0113 7 (256,448,3)
860
+ 00068/0117 7 (256,448,3)
861
+ 00068/0121 7 (256,448,3)
862
+ 00068/0206 7 (256,448,3)
863
+ 00068/0261 7 (256,448,3)
864
+ 00068/0321 7 (256,448,3)
865
+ 00068/0354 7 (256,448,3)
866
+ 00068/0380 7 (256,448,3)
867
+ 00068/0419 7 (256,448,3)
868
+ 00068/0547 7 (256,448,3)
869
+ 00068/0561 7 (256,448,3)
870
+ 00068/0565 7 (256,448,3)
871
+ 00068/0583 7 (256,448,3)
872
+ 00068/0599 7 (256,448,3)
873
+ 00068/0739 7 (256,448,3)
874
+ 00068/0743 7 (256,448,3)
875
+ 00068/0754 7 (256,448,3)
876
+ 00068/0812 7 (256,448,3)
877
+ 00069/0178 7 (256,448,3)
878
+ 00070/0025 7 (256,448,3)
879
+ 00070/0030 7 (256,448,3)
880
+ 00070/0036 7 (256,448,3)
881
+ 00070/0042 7 (256,448,3)
882
+ 00070/0078 7 (256,448,3)
883
+ 00070/0079 7 (256,448,3)
884
+ 00070/0362 7 (256,448,3)
885
+ 00071/0195 7 (256,448,3)
886
+ 00071/0210 7 (256,448,3)
887
+ 00071/0211 7 (256,448,3)
888
+ 00071/0221 7 (256,448,3)
889
+ 00071/0352 7 (256,448,3)
890
+ 00071/0354 7 (256,448,3)
891
+ 00071/0366 7 (256,448,3)
892
+ 00071/0454 7 (256,448,3)
893
+ 00071/0464 7 (256,448,3)
894
+ 00071/0487 7 (256,448,3)
895
+ 00071/0502 7 (256,448,3)
896
+ 00071/0561 7 (256,448,3)
897
+ 00071/0676 7 (256,448,3)
898
+ 00071/0808 7 (256,448,3)
899
+ 00071/0813 7 (256,448,3)
900
+ 00071/0836 7 (256,448,3)
901
+ 00072/0286 7 (256,448,3)
902
+ 00072/0290 7 (256,448,3)
903
+ 00072/0298 7 (256,448,3)
904
+ 00072/0302 7 (256,448,3)
905
+ 00072/0333 7 (256,448,3)
906
+ 00072/0590 7 (256,448,3)
907
+ 00072/0793 7 (256,448,3)
908
+ 00072/0803 7 (256,448,3)
909
+ 00072/0833 7 (256,448,3)
910
+ 00073/0049 7 (256,448,3)
911
+ 00073/0050 7 (256,448,3)
912
+ 00073/0388 7 (256,448,3)
913
+ 00073/0480 7 (256,448,3)
914
+ 00073/0485 7 (256,448,3)
915
+ 00073/0611 7 (256,448,3)
916
+ 00073/0616 7 (256,448,3)
917
+ 00073/0714 7 (256,448,3)
918
+ 00073/0724 7 (256,448,3)
919
+ 00073/0730 7 (256,448,3)
920
+ 00074/0034 7 (256,448,3)
921
+ 00074/0228 7 (256,448,3)
922
+ 00074/0239 7 (256,448,3)
923
+ 00074/0275 7 (256,448,3)
924
+ 00074/0527 7 (256,448,3)
925
+ 00074/0620 7 (256,448,3)
926
+ 00074/0764 7 (256,448,3)
927
+ 00074/0849 7 (256,448,3)
928
+ 00074/0893 7 (256,448,3)
929
+ 00075/0333 7 (256,448,3)
930
+ 00075/0339 7 (256,448,3)
931
+ 00075/0347 7 (256,448,3)
932
+ 00075/0399 7 (256,448,3)
933
+ 00075/0478 7 (256,448,3)
934
+ 00075/0494 7 (256,448,3)
935
+ 00075/0678 7 (256,448,3)
936
+ 00075/0688 7 (256,448,3)
937
+ 00075/0706 7 (256,448,3)
938
+ 00075/0709 7 (256,448,3)
939
+ 00075/0748 7 (256,448,3)
940
+ 00075/0769 7 (256,448,3)
941
+ 00075/0777 7 (256,448,3)
942
+ 00075/0781 7 (256,448,3)
943
+ 00076/0151 7 (256,448,3)
944
+ 00076/0159 7 (256,448,3)
945
+ 00076/0164 7 (256,448,3)
946
+ 00076/0265 7 (256,448,3)
947
+ 00076/0269 7 (256,448,3)
948
+ 00076/0433 7 (256,448,3)
949
+ 00076/0813 7 (256,448,3)
950
+ 00076/0817 7 (256,448,3)
951
+ 00076/0818 7 (256,448,3)
952
+ 00076/0827 7 (256,448,3)
953
+ 00076/0874 7 (256,448,3)
954
+ 00076/0880 7 (256,448,3)
955
+ 00076/0891 7 (256,448,3)
956
+ 00076/0894 7 (256,448,3)
957
+ 00076/0909 7 (256,448,3)
958
+ 00076/0913 7 (256,448,3)
959
+ 00076/0926 7 (256,448,3)
960
+ 00076/0962 7 (256,448,3)
961
+ 00076/0973 7 (256,448,3)
962
+ 00076/0986 7 (256,448,3)
963
+ 00077/0617 7 (256,448,3)
964
+ 00077/0623 7 (256,448,3)
965
+ 00077/0628 7 (256,448,3)
966
+ 00077/0629 7 (256,448,3)
967
+ 00077/0631 7 (256,448,3)
968
+ 00077/0639 7 (256,448,3)
969
+ 00077/0982 7 (256,448,3)
970
+ 00077/0984 7 (256,448,3)
971
+ 00077/0995 7 (256,448,3)
972
+ 00077/0998 7 (256,448,3)
973
+ 00078/0001 7 (256,448,3)
974
+ 00078/0015 7 (256,448,3)
975
+ 00078/0157 7 (256,448,3)
976
+ 00078/0161 7 (256,448,3)
977
+ 00078/0175 7 (256,448,3)
978
+ 00078/0178 7 (256,448,3)
979
+ 00078/0189 7 (256,448,3)
980
+ 00078/0192 7 (256,448,3)
981
+ 00078/0229 7 (256,448,3)
982
+ 00078/0237 7 (256,448,3)
983
+ 00078/0241 7 (256,448,3)
984
+ 00078/0249 7 (256,448,3)
985
+ 00078/0251 7 (256,448,3)
986
+ 00078/0254 7 (256,448,3)
987
+ 00078/0258 7 (256,448,3)
988
+ 00078/0311 7 (256,448,3)
989
+ 00078/0603 7 (256,448,3)
990
+ 00078/0607 7 (256,448,3)
991
+ 00078/0824 7 (256,448,3)
992
+ 00079/0045 7 (256,448,3)
993
+ 00079/0048 7 (256,448,3)
994
+ 00079/0054 7 (256,448,3)
995
+ 00080/0050 7 (256,448,3)
996
+ 00080/0488 7 (256,448,3)
997
+ 00080/0494 7 (256,448,3)
998
+ 00080/0496 7 (256,448,3)
999
+ 00080/0499 7 (256,448,3)
1000
+ 00080/0502 7 (256,448,3)
1001
+ 00080/0510 7 (256,448,3)
1002
+ 00080/0534 7 (256,448,3)
1003
+ 00080/0558 7 (256,448,3)
1004
+ 00080/0571 7 (256,448,3)
1005
+ 00080/0709 7 (256,448,3)
1006
+ 00080/0882 7 (256,448,3)
1007
+ 00081/0322 7 (256,448,3)
1008
+ 00081/0428 7 (256,448,3)
1009
+ 00081/0700 7 (256,448,3)
1010
+ 00081/0706 7 (256,448,3)
1011
+ 00081/0707 7 (256,448,3)
1012
+ 00081/0937 7 (256,448,3)
1013
+ 00082/0021 7 (256,448,3)
1014
+ 00082/0424 7 (256,448,3)
1015
+ 00082/0794 7 (256,448,3)
1016
+ 00082/0807 7 (256,448,3)
1017
+ 00082/0810 7 (256,448,3)
1018
+ 00082/0824 7 (256,448,3)
1019
+ 00083/0129 7 (256,448,3)
1020
+ 00083/0131 7 (256,448,3)
1021
+ 00083/0249 7 (256,448,3)
1022
+ 00083/0250 7 (256,448,3)
1023
+ 00083/0656 7 (256,448,3)
1024
+ 00083/0812 7 (256,448,3)
1025
+ 00083/0819 7 (256,448,3)
1026
+ 00083/0824 7 (256,448,3)
1027
+ 00083/0827 7 (256,448,3)
1028
+ 00083/0841 7 (256,448,3)
1029
+ 00083/0963 7 (256,448,3)
1030
+ 00084/0047 7 (256,448,3)
1031
+ 00084/0319 7 (256,448,3)
1032
+ 00084/0334 7 (256,448,3)
1033
+ 00084/0363 7 (256,448,3)
1034
+ 00084/0493 7 (256,448,3)
1035
+ 00084/0655 7 (256,448,3)
1036
+ 00084/0752 7 (256,448,3)
1037
+ 00084/0813 7 (256,448,3)
1038
+ 00084/0886 7 (256,448,3)
1039
+ 00084/0948 7 (256,448,3)
1040
+ 00084/0976 7 (256,448,3)
1041
+ 00085/0512 7 (256,448,3)
1042
+ 00085/0641 7 (256,448,3)
1043
+ 00085/0653 7 (256,448,3)
1044
+ 00085/0655 7 (256,448,3)
1045
+ 00085/0697 7 (256,448,3)
1046
+ 00085/0698 7 (256,448,3)
1047
+ 00085/0700 7 (256,448,3)
1048
+ 00085/0703 7 (256,448,3)
1049
+ 00085/0705 7 (256,448,3)
1050
+ 00085/0709 7 (256,448,3)
1051
+ 00085/0713 7 (256,448,3)
1052
+ 00085/0739 7 (256,448,3)
1053
+ 00085/0750 7 (256,448,3)
1054
+ 00085/0763 7 (256,448,3)
1055
+ 00085/0765 7 (256,448,3)
1056
+ 00085/0769 7 (256,448,3)
1057
+ 00085/0863 7 (256,448,3)
1058
+ 00085/0868 7 (256,448,3)
1059
+ 00085/0927 7 (256,448,3)
1060
+ 00085/0936 7 (256,448,3)
1061
+ 00085/0965 7 (256,448,3)
1062
+ 00085/0969 7 (256,448,3)
1063
+ 00085/0974 7 (256,448,3)
1064
+ 00085/0981 7 (256,448,3)
1065
+ 00085/0982 7 (256,448,3)
1066
+ 00085/1000 7 (256,448,3)
1067
+ 00086/0003 7 (256,448,3)
1068
+ 00086/0009 7 (256,448,3)
1069
+ 00086/0011 7 (256,448,3)
1070
+ 00086/0028 7 (256,448,3)
1071
+ 00086/0032 7 (256,448,3)
1072
+ 00086/0034 7 (256,448,3)
1073
+ 00086/0035 7 (256,448,3)
1074
+ 00086/0042 7 (256,448,3)
1075
+ 00086/0064 7 (256,448,3)
1076
+ 00086/0066 7 (256,448,3)
1077
+ 00086/0095 7 (256,448,3)
1078
+ 00086/0099 7 (256,448,3)
1079
+ 00086/0101 7 (256,448,3)
1080
+ 00086/0104 7 (256,448,3)
1081
+ 00086/0115 7 (256,448,3)
1082
+ 00086/0116 7 (256,448,3)
1083
+ 00086/0284 7 (256,448,3)
1084
+ 00086/0291 7 (256,448,3)
1085
+ 00086/0295 7 (256,448,3)
1086
+ 00086/0302 7 (256,448,3)
1087
+ 00086/0318 7 (256,448,3)
1088
+ 00086/0666 7 (256,448,3)
1089
+ 00086/0797 7 (256,448,3)
1090
+ 00086/0851 7 (256,448,3)
1091
+ 00086/0855 7 (256,448,3)
1092
+ 00086/0874 7 (256,448,3)
1093
+ 00086/0878 7 (256,448,3)
1094
+ 00086/0881 7 (256,448,3)
1095
+ 00086/0883 7 (256,448,3)
1096
+ 00086/0896 7 (256,448,3)
1097
+ 00086/0899 7 (256,448,3)
1098
+ 00086/0903 7 (256,448,3)
1099
+ 00086/0989 7 (256,448,3)
1100
+ 00087/0008 7 (256,448,3)
1101
+ 00087/0429 7 (256,448,3)
1102
+ 00087/0511 7 (256,448,3)
1103
+ 00088/0241 7 (256,448,3)
1104
+ 00088/0319 7 (256,448,3)
1105
+ 00088/0323 7 (256,448,3)
1106
+ 00088/0411 7 (256,448,3)
1107
+ 00088/0427 7 (256,448,3)
1108
+ 00088/0452 7 (256,448,3)
1109
+ 00088/0463 7 (256,448,3)
1110
+ 00088/0476 7 (256,448,3)
1111
+ 00088/0496 7 (256,448,3)
1112
+ 00088/0559 7 (256,448,3)
1113
+ 00089/0058 7 (256,448,3)
1114
+ 00089/0061 7 (256,448,3)
1115
+ 00089/0069 7 (256,448,3)
1116
+ 00089/0077 7 (256,448,3)
1117
+ 00089/0096 7 (256,448,3)
1118
+ 00089/0099 7 (256,448,3)
1119
+ 00089/0100 7 (256,448,3)
1120
+ 00089/0211 7 (256,448,3)
1121
+ 00089/0380 7 (256,448,3)
1122
+ 00089/0381 7 (256,448,3)
1123
+ 00089/0384 7 (256,448,3)
1124
+ 00089/0390 7 (256,448,3)
1125
+ 00089/0393 7 (256,448,3)
1126
+ 00089/0394 7 (256,448,3)
1127
+ 00089/0395 7 (256,448,3)
1128
+ 00089/0406 7 (256,448,3)
1129
+ 00089/0410 7 (256,448,3)
1130
+ 00089/0412 7 (256,448,3)
1131
+ 00089/0703 7 (256,448,3)
1132
+ 00089/0729 7 (256,448,3)
1133
+ 00089/0930 7 (256,448,3)
1134
+ 00089/0952 7 (256,448,3)
1135
+ 00090/0062 7 (256,448,3)
1136
+ 00090/0101 7 (256,448,3)
1137
+ 00090/0213 7 (256,448,3)
1138
+ 00090/0216 7 (256,448,3)
1139
+ 00090/0268 7 (256,448,3)
1140
+ 00090/0406 7 (256,448,3)
1141
+ 00090/0411 7 (256,448,3)
1142
+ 00090/0442 7 (256,448,3)
1143
+ 00090/0535 7 (256,448,3)
1144
+ 00090/0542 7 (256,448,3)
1145
+ 00090/0571 7 (256,448,3)
1146
+ 00090/0934 7 (256,448,3)
1147
+ 00090/0938 7 (256,448,3)
1148
+ 00090/0947 7 (256,448,3)
1149
+ 00091/0066 7 (256,448,3)
1150
+ 00091/0448 7 (256,448,3)
1151
+ 00091/0451 7 (256,448,3)
1152
+ 00091/0454 7 (256,448,3)
1153
+ 00091/0457 7 (256,448,3)
1154
+ 00091/0467 7 (256,448,3)
1155
+ 00091/0470 7 (256,448,3)
1156
+ 00091/0477 7 (256,448,3)
1157
+ 00091/0583 7 (256,448,3)
1158
+ 00091/0981 7 (256,448,3)
1159
+ 00091/0994 7 (256,448,3)
1160
+ 00092/0112 7 (256,448,3)
1161
+ 00092/0119 7 (256,448,3)
1162
+ 00092/0129 7 (256,448,3)
1163
+ 00092/0146 7 (256,448,3)
1164
+ 00092/0149 7 (256,448,3)
1165
+ 00092/0608 7 (256,448,3)
1166
+ 00092/0643 7 (256,448,3)
1167
+ 00092/0646 7 (256,448,3)
1168
+ 00092/0766 7 (256,448,3)
1169
+ 00092/0768 7 (256,448,3)
1170
+ 00092/0779 7 (256,448,3)
1171
+ 00093/0081 7 (256,448,3)
1172
+ 00093/0085 7 (256,448,3)
1173
+ 00093/0135 7 (256,448,3)
1174
+ 00093/0241 7 (256,448,3)
1175
+ 00093/0277 7 (256,448,3)
1176
+ 00093/0283 7 (256,448,3)
1177
+ 00093/0320 7 (256,448,3)
1178
+ 00093/0598 7 (256,448,3)
1179
+ 00094/0159 7 (256,448,3)
1180
+ 00094/0253 7 (256,448,3)
1181
+ 00094/0265 7 (256,448,3)
1182
+ 00094/0267 7 (256,448,3)
1183
+ 00094/0269 7 (256,448,3)
1184
+ 00094/0281 7 (256,448,3)
1185
+ 00094/0293 7 (256,448,3)
1186
+ 00094/0404 7 (256,448,3)
1187
+ 00094/0593 7 (256,448,3)
1188
+ 00094/0612 7 (256,448,3)
1189
+ 00094/0638 7 (256,448,3)
1190
+ 00094/0656 7 (256,448,3)
1191
+ 00094/0668 7 (256,448,3)
1192
+ 00094/0786 7 (256,448,3)
1193
+ 00094/0870 7 (256,448,3)
1194
+ 00094/0897 7 (256,448,3)
1195
+ 00094/0900 7 (256,448,3)
1196
+ 00094/0944 7 (256,448,3)
1197
+ 00094/0946 7 (256,448,3)
1198
+ 00094/0952 7 (256,448,3)
1199
+ 00094/0969 7 (256,448,3)
1200
+ 00094/0973 7 (256,448,3)
1201
+ 00094/0981 7 (256,448,3)
1202
+ 00095/0088 7 (256,448,3)
1203
+ 00095/0125 7 (256,448,3)
1204
+ 00095/0130 7 (256,448,3)
1205
+ 00095/0142 7 (256,448,3)
1206
+ 00095/0151 7 (256,448,3)
1207
+ 00095/0180 7 (256,448,3)
1208
+ 00095/0192 7 (256,448,3)
1209
+ 00095/0194 7 (256,448,3)
1210
+ 00095/0195 7 (256,448,3)
1211
+ 00095/0204 7 (256,448,3)
1212
+ 00095/0245 7 (256,448,3)
1213
+ 00095/0315 7 (256,448,3)
1214
+ 00095/0321 7 (256,448,3)
1215
+ 00095/0324 7 (256,448,3)
1216
+ 00095/0327 7 (256,448,3)
1217
+ 00095/0730 7 (256,448,3)
1218
+ 00095/0731 7 (256,448,3)
1219
+ 00095/0741 7 (256,448,3)
1220
+ 00095/0948 7 (256,448,3)
1221
+ 00096/0407 7 (256,448,3)
1222
+ 00096/0420 7 (256,448,3)
1223
+ 00096/0435 7 (256,448,3)
1224
+ 00096/0682 7 (256,448,3)
1225
+ 00096/0865 7 (256,448,3)
NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_medium_GT.txt ADDED
The diff for this file is too large to render. See raw diff
 
NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_test_slow_GT.txt ADDED
@@ -0,0 +1,1613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 00001/0266 7 (256,448,3)
2
+ 00001/0268 7 (256,448,3)
3
+ 00001/0275 7 (256,448,3)
4
+ 00001/0278 7 (256,448,3)
5
+ 00001/0287 7 (256,448,3)
6
+ 00001/0291 7 (256,448,3)
7
+ 00001/0627 7 (256,448,3)
8
+ 00001/0636 7 (256,448,3)
9
+ 00001/0804 7 (256,448,3)
10
+ 00001/0837 7 (256,448,3)
11
+ 00001/0849 7 (256,448,3)
12
+ 00001/0851 7 (256,448,3)
13
+ 00001/0852 7 (256,448,3)
14
+ 00001/0986 7 (256,448,3)
15
+ 00001/0991 7 (256,448,3)
16
+ 00002/0007 7 (256,448,3)
17
+ 00002/0008 7 (256,448,3)
18
+ 00002/0016 7 (256,448,3)
19
+ 00002/0036 7 (256,448,3)
20
+ 00002/0091 7 (256,448,3)
21
+ 00002/0093 7 (256,448,3)
22
+ 00002/0209 7 (256,448,3)
23
+ 00002/0235 7 (256,448,3)
24
+ 00002/0236 7 (256,448,3)
25
+ 00002/0241 7 (256,448,3)
26
+ 00002/0466 7 (256,448,3)
27
+ 00002/0504 7 (256,448,3)
28
+ 00002/0960 7 (256,448,3)
29
+ 00002/0961 7 (256,448,3)
30
+ 00002/0964 7 (256,448,3)
31
+ 00003/0007 7 (256,448,3)
32
+ 00003/0069 7 (256,448,3)
33
+ 00003/0345 7 (256,448,3)
34
+ 00003/0347 7 (256,448,3)
35
+ 00003/0372 7 (256,448,3)
36
+ 00003/0525 7 (256,448,3)
37
+ 00003/0652 7 (256,448,3)
38
+ 00003/0667 7 (256,448,3)
39
+ 00003/0669 7 (256,448,3)
40
+ 00003/0706 7 (256,448,3)
41
+ 00003/0713 7 (256,448,3)
42
+ 00003/0721 7 (256,448,3)
43
+ 00003/0747 7 (256,448,3)
44
+ 00003/0829 7 (256,448,3)
45
+ 00003/0916 7 (256,448,3)
46
+ 00003/0918 7 (256,448,3)
47
+ 00003/0924 7 (256,448,3)
48
+ 00003/0926 7 (256,448,3)
49
+ 00003/0927 7 (256,448,3)
50
+ 00004/0288 7 (256,448,3)
51
+ 00004/0303 7 (256,448,3)
52
+ 00004/0307 7 (256,448,3)
53
+ 00004/0628 7 (256,448,3)
54
+ 00004/0713 7 (256,448,3)
55
+ 00004/0715 7 (256,448,3)
56
+ 00004/0719 7 (256,448,3)
57
+ 00004/0727 7 (256,448,3)
58
+ 00004/0821 7 (256,448,3)
59
+ 00005/0006 7 (256,448,3)
60
+ 00005/0007 7 (256,448,3)
61
+ 00005/0012 7 (256,448,3)
62
+ 00005/0013 7 (256,448,3)
63
+ 00005/0040 7 (256,448,3)
64
+ 00005/0055 7 (256,448,3)
65
+ 00005/0119 7 (256,448,3)
66
+ 00005/0130 7 (256,448,3)
67
+ 00005/0185 7 (256,448,3)
68
+ 00005/0198 7 (256,448,3)
69
+ 00005/0270 7 (256,448,3)
70
+ 00005/0541 7 (256,448,3)
71
+ 00005/0560 7 (256,448,3)
72
+ 00005/0660 7 (256,448,3)
73
+ 00005/0682 7 (256,448,3)
74
+ 00005/0683 7 (256,448,3)
75
+ 00005/0688 7 (256,448,3)
76
+ 00005/0706 7 (256,448,3)
77
+ 00005/0728 7 (256,448,3)
78
+ 00005/0732 7 (256,448,3)
79
+ 00005/0739 7 (256,448,3)
80
+ 00005/0804 7 (256,448,3)
81
+ 00005/0805 7 (256,448,3)
82
+ 00005/0827 7 (256,448,3)
83
+ 00005/0828 7 (256,448,3)
84
+ 00005/0857 7 (256,448,3)
85
+ 00005/0861 7 (256,448,3)
86
+ 00005/0862 7 (256,448,3)
87
+ 00005/0868 7 (256,448,3)
88
+ 00005/0872 7 (256,448,3)
89
+ 00005/0933 7 (256,448,3)
90
+ 00005/0958 7 (256,448,3)
91
+ 00005/0960 7 (256,448,3)
92
+ 00006/0087 7 (256,448,3)
93
+ 00006/0090 7 (256,448,3)
94
+ 00006/0351 7 (256,448,3)
95
+ 00006/0353 7 (256,448,3)
96
+ 00006/0558 7 (256,448,3)
97
+ 00006/0588 7 (256,448,3)
98
+ 00006/0619 7 (256,448,3)
99
+ 00006/0621 7 (256,448,3)
100
+ 00006/0748 7 (256,448,3)
101
+ 00006/0796 7 (256,448,3)
102
+ 00006/0805 7 (256,448,3)
103
+ 00006/0807 7 (256,448,3)
104
+ 00007/0236 7 (256,448,3)
105
+ 00007/0240 7 (256,448,3)
106
+ 00007/0243 7 (256,448,3)
107
+ 00007/0246 7 (256,448,3)
108
+ 00007/0247 7 (256,448,3)
109
+ 00007/0252 7 (256,448,3)
110
+ 00007/0322 7 (256,448,3)
111
+ 00007/0458 7 (256,448,3)
112
+ 00007/0492 7 (256,448,3)
113
+ 00007/0658 7 (256,448,3)
114
+ 00007/0717 7 (256,448,3)
115
+ 00007/0722 7 (256,448,3)
116
+ 00007/0725 7 (256,448,3)
117
+ 00007/0740 7 (256,448,3)
118
+ 00007/0748 7 (256,448,3)
119
+ 00007/0749 7 (256,448,3)
120
+ 00007/0759 7 (256,448,3)
121
+ 00007/0772 7 (256,448,3)
122
+ 00007/0783 7 (256,448,3)
123
+ 00007/0787 7 (256,448,3)
124
+ 00007/0883 7 (256,448,3)
125
+ 00008/0033 7 (256,448,3)
126
+ 00008/0035 7 (256,448,3)
127
+ 00008/0091 7 (256,448,3)
128
+ 00008/0154 7 (256,448,3)
129
+ 00008/0966 7 (256,448,3)
130
+ 00008/0987 7 (256,448,3)
131
+ 00009/0108 7 (256,448,3)
132
+ 00009/0607 7 (256,448,3)
133
+ 00009/0668 7 (256,448,3)
134
+ 00009/0683 7 (256,448,3)
135
+ 00009/0941 7 (256,448,3)
136
+ 00009/0949 7 (256,448,3)
137
+ 00009/0962 7 (256,448,3)
138
+ 00009/0972 7 (256,448,3)
139
+ 00009/0974 7 (256,448,3)
140
+ 00010/0014 7 (256,448,3)
141
+ 00010/0018 7 (256,448,3)
142
+ 00010/0043 7 (256,448,3)
143
+ 00010/0099 7 (256,448,3)
144
+ 00010/0252 7 (256,448,3)
145
+ 00010/0296 7 (256,448,3)
146
+ 00010/0413 7 (256,448,3)
147
+ 00010/0422 7 (256,448,3)
148
+ 00010/0516 7 (256,448,3)
149
+ 00010/0525 7 (256,448,3)
150
+ 00010/0556 7 (256,448,3)
151
+ 00010/0701 7 (256,448,3)
152
+ 00010/0740 7 (256,448,3)
153
+ 00010/0772 7 (256,448,3)
154
+ 00010/0831 7 (256,448,3)
155
+ 00010/0925 7 (256,448,3)
156
+ 00011/0013 7 (256,448,3)
157
+ 00011/0016 7 (256,448,3)
158
+ 00011/0017 7 (256,448,3)
159
+ 00011/0249 7 (256,448,3)
160
+ 00011/0826 7 (256,448,3)
161
+ 00011/0827 7 (256,448,3)
162
+ 00011/0831 7 (256,448,3)
163
+ 00011/0833 7 (256,448,3)
164
+ 00011/0835 7 (256,448,3)
165
+ 00011/0998 7 (256,448,3)
166
+ 00012/0023 7 (256,448,3)
167
+ 00012/0024 7 (256,448,3)
168
+ 00012/0027 7 (256,448,3)
169
+ 00012/0037 7 (256,448,3)
170
+ 00012/0444 7 (256,448,3)
171
+ 00012/0445 7 (256,448,3)
172
+ 00012/0451 7 (256,448,3)
173
+ 00012/0461 7 (256,448,3)
174
+ 00012/0521 7 (256,448,3)
175
+ 00012/0758 7 (256,448,3)
176
+ 00012/0760 7 (256,448,3)
177
+ 00012/0771 7 (256,448,3)
178
+ 00012/0903 7 (256,448,3)
179
+ 00012/0909 7 (256,448,3)
180
+ 00013/0581 7 (256,448,3)
181
+ 00013/0786 7 (256,448,3)
182
+ 00013/0789 7 (256,448,3)
183
+ 00013/0791 7 (256,448,3)
184
+ 00013/0798 7 (256,448,3)
185
+ 00013/0802 7 (256,448,3)
186
+ 00013/0820 7 (256,448,3)
187
+ 00013/0850 7 (256,448,3)
188
+ 00013/0854 7 (256,448,3)
189
+ 00013/0894 7 (256,448,3)
190
+ 00013/0919 7 (256,448,3)
191
+ 00013/0999 7 (256,448,3)
192
+ 00014/0001 7 (256,448,3)
193
+ 00014/0014 7 (256,448,3)
194
+ 00014/0018 7 (256,448,3)
195
+ 00014/0244 7 (256,448,3)
196
+ 00014/0475 7 (256,448,3)
197
+ 00014/0483 7 (256,448,3)
198
+ 00014/0680 7 (256,448,3)
199
+ 00014/0700 7 (256,448,3)
200
+ 00014/0701 7 (256,448,3)
201
+ 00014/0706 7 (256,448,3)
202
+ 00014/0712 7 (256,448,3)
203
+ 00014/0713 7 (256,448,3)
204
+ 00014/0717 7 (256,448,3)
205
+ 00014/0719 7 (256,448,3)
206
+ 00014/0728 7 (256,448,3)
207
+ 00014/0734 7 (256,448,3)
208
+ 00014/0736 7 (256,448,3)
209
+ 00014/0738 7 (256,448,3)
210
+ 00014/0742 7 (256,448,3)
211
+ 00014/0745 7 (256,448,3)
212
+ 00014/0746 7 (256,448,3)
213
+ 00014/0750 7 (256,448,3)
214
+ 00014/0769 7 (256,448,3)
215
+ 00014/0774 7 (256,448,3)
216
+ 00014/0781 7 (256,448,3)
217
+ 00014/0782 7 (256,448,3)
218
+ 00014/0852 7 (256,448,3)
219
+ 00014/0853 7 (256,448,3)
220
+ 00014/0855 7 (256,448,3)
221
+ 00014/0867 7 (256,448,3)
222
+ 00014/0876 7 (256,448,3)
223
+ 00014/0881 7 (256,448,3)
224
+ 00014/0890 7 (256,448,3)
225
+ 00014/0914 7 (256,448,3)
226
+ 00015/0033 7 (256,448,3)
227
+ 00015/0113 7 (256,448,3)
228
+ 00015/0125 7 (256,448,3)
229
+ 00015/0185 7 (256,448,3)
230
+ 00015/0194 7 (256,448,3)
231
+ 00015/0202 7 (256,448,3)
232
+ 00015/0312 7 (256,448,3)
233
+ 00015/0688 7 (256,448,3)
234
+ 00015/0698 7 (256,448,3)
235
+ 00015/0788 7 (256,448,3)
236
+ 00015/0854 7 (256,448,3)
237
+ 00015/0863 7 (256,448,3)
238
+ 00015/0864 7 (256,448,3)
239
+ 00015/0918 7 (256,448,3)
240
+ 00015/0931 7 (256,448,3)
241
+ 00016/0276 7 (256,448,3)
242
+ 00016/0301 7 (256,448,3)
243
+ 00016/0306 7 (256,448,3)
244
+ 00016/0324 7 (256,448,3)
245
+ 00016/0362 7 (256,448,3)
246
+ 00016/0364 7 (256,448,3)
247
+ 00016/0370 7 (256,448,3)
248
+ 00016/0378 7 (256,448,3)
249
+ 00016/0379 7 (256,448,3)
250
+ 00016/0402 7 (256,448,3)
251
+ 00016/0405 7 (256,448,3)
252
+ 00016/0418 7 (256,448,3)
253
+ 00016/0419 7 (256,448,3)
254
+ 00016/0435 7 (256,448,3)
255
+ 00016/0501 7 (256,448,3)
256
+ 00016/0561 7 (256,448,3)
257
+ 00016/0562 7 (256,448,3)
258
+ 00016/0569 7 (256,448,3)
259
+ 00016/0591 7 (256,448,3)
260
+ 00016/0599 7 (256,448,3)
261
+ 00016/0711 7 (256,448,3)
262
+ 00016/0713 7 (256,448,3)
263
+ 00016/0813 7 (256,448,3)
264
+ 00016/0953 7 (256,448,3)
265
+ 00016/0960 7 (256,448,3)
266
+ 00016/0961 7 (256,448,3)
267
+ 00017/0519 7 (256,448,3)
268
+ 00017/0523 7 (256,448,3)
269
+ 00017/0588 7 (256,448,3)
270
+ 00017/0608 7 (256,448,3)
271
+ 00017/0609 7 (256,448,3)
272
+ 00017/0719 7 (256,448,3)
273
+ 00017/0721 7 (256,448,3)
274
+ 00017/0727 7 (256,448,3)
275
+ 00017/0728 7 (256,448,3)
276
+ 00017/0769 7 (256,448,3)
277
+ 00017/0775 7 (256,448,3)
278
+ 00017/0787 7 (256,448,3)
279
+ 00017/0797 7 (256,448,3)
280
+ 00018/0043 7 (256,448,3)
281
+ 00018/0206 7 (256,448,3)
282
+ 00018/0209 7 (256,448,3)
283
+ 00018/0211 7 (256,448,3)
284
+ 00018/0216 7 (256,448,3)
285
+ 00018/0220 7 (256,448,3)
286
+ 00018/0221 7 (256,448,3)
287
+ 00018/0252 7 (256,448,3)
288
+ 00018/0260 7 (256,448,3)
289
+ 00018/0331 7 (256,448,3)
290
+ 00018/0333 7 (256,448,3)
291
+ 00018/0447 7 (256,448,3)
292
+ 00018/0523 7 (256,448,3)
293
+ 00019/0014 7 (256,448,3)
294
+ 00019/0015 7 (256,448,3)
295
+ 00019/0019 7 (256,448,3)
296
+ 00019/0049 7 (256,448,3)
297
+ 00019/0109 7 (256,448,3)
298
+ 00019/0114 7 (256,448,3)
299
+ 00019/0125 7 (256,448,3)
300
+ 00019/0137 7 (256,448,3)
301
+ 00019/0140 7 (256,448,3)
302
+ 00019/0148 7 (256,448,3)
303
+ 00019/0153 7 (256,448,3)
304
+ 00019/0155 7 (256,448,3)
305
+ 00019/0158 7 (256,448,3)
306
+ 00019/0159 7 (256,448,3)
307
+ 00019/0160 7 (256,448,3)
308
+ 00019/0162 7 (256,448,3)
309
+ 00019/0279 7 (256,448,3)
310
+ 00019/0282 7 (256,448,3)
311
+ 00019/0409 7 (256,448,3)
312
+ 00019/0427 7 (256,448,3)
313
+ 00019/0430 7 (256,448,3)
314
+ 00019/0545 7 (256,448,3)
315
+ 00019/0555 7 (256,448,3)
316
+ 00019/0558 7 (256,448,3)
317
+ 00019/0650 7 (256,448,3)
318
+ 00019/0681 7 (256,448,3)
319
+ 00019/0747 7 (256,448,3)
320
+ 00019/0748 7 (256,448,3)
321
+ 00019/0749 7 (256,448,3)
322
+ 00019/0752 7 (256,448,3)
323
+ 00019/0768 7 (256,448,3)
324
+ 00019/0772 7 (256,448,3)
325
+ 00019/0773 7 (256,448,3)
326
+ 00019/0777 7 (256,448,3)
327
+ 00019/0795 7 (256,448,3)
328
+ 00019/0806 7 (256,448,3)
329
+ 00019/0815 7 (256,448,3)
330
+ 00019/0840 7 (256,448,3)
331
+ 00019/0844 7 (256,448,3)
332
+ 00019/0848 7 (256,448,3)
333
+ 00019/0853 7 (256,448,3)
334
+ 00019/0863 7 (256,448,3)
335
+ 00019/0888 7 (256,448,3)
336
+ 00019/0894 7 (256,448,3)
337
+ 00019/0901 7 (256,448,3)
338
+ 00019/0995 7 (256,448,3)
339
+ 00021/0030 7 (256,448,3)
340
+ 00021/0035 7 (256,448,3)
341
+ 00021/0039 7 (256,448,3)
342
+ 00021/0041 7 (256,448,3)
343
+ 00021/0044 7 (256,448,3)
344
+ 00021/0045 7 (256,448,3)
345
+ 00021/0264 7 (256,448,3)
346
+ 00021/0330 7 (256,448,3)
347
+ 00021/0332 7 (256,448,3)
348
+ 00021/0333 7 (256,448,3)
349
+ 00021/0336 7 (256,448,3)
350
+ 00021/0337 7 (256,448,3)
351
+ 00021/0338 7 (256,448,3)
352
+ 00021/0343 7 (256,448,3)
353
+ 00021/0472 7 (256,448,3)
354
+ 00021/0667 7 (256,448,3)
355
+ 00021/0731 7 (256,448,3)
356
+ 00021/0779 7 (256,448,3)
357
+ 00021/0805 7 (256,448,3)
358
+ 00021/0814 7 (256,448,3)
359
+ 00021/0818 7 (256,448,3)
360
+ 00021/0874 7 (256,448,3)
361
+ 00022/0008 7 (256,448,3)
362
+ 00022/0010 7 (256,448,3)
363
+ 00022/0231 7 (256,448,3)
364
+ 00022/0323 7 (256,448,3)
365
+ 00022/0337 7 (256,448,3)
366
+ 00022/0359 7 (256,448,3)
367
+ 00022/0377 7 (256,448,3)
368
+ 00022/0378 7 (256,448,3)
369
+ 00022/0385 7 (256,448,3)
370
+ 00022/0393 7 (256,448,3)
371
+ 00022/0424 7 (256,448,3)
372
+ 00022/0582 7 (256,448,3)
373
+ 00022/0583 7 (256,448,3)
374
+ 00022/0605 7 (256,448,3)
375
+ 00022/0632 7 (256,448,3)
376
+ 00022/0633 7 (256,448,3)
377
+ 00022/0666 7 (256,448,3)
378
+ 00022/0671 7 (256,448,3)
379
+ 00022/0673 7 (256,448,3)
380
+ 00022/0702 7 (256,448,3)
381
+ 00022/0852 7 (256,448,3)
382
+ 00022/0853 7 (256,448,3)
383
+ 00022/0971 7 (256,448,3)
384
+ 00023/0037 7 (256,448,3)
385
+ 00023/0224 7 (256,448,3)
386
+ 00023/0308 7 (256,448,3)
387
+ 00023/0393 7 (256,448,3)
388
+ 00023/0633 7 (256,448,3)
389
+ 00023/0637 7 (256,448,3)
390
+ 00023/0638 7 (256,448,3)
391
+ 00023/0770 7 (256,448,3)
392
+ 00023/0786 7 (256,448,3)
393
+ 00023/0898 7 (256,448,3)
394
+ 00024/0247 7 (256,448,3)
395
+ 00024/0251 7 (256,448,3)
396
+ 00024/0267 7 (256,448,3)
397
+ 00024/0288 7 (256,448,3)
398
+ 00024/0530 7 (256,448,3)
399
+ 00024/0569 7 (256,448,3)
400
+ 00024/0587 7 (256,448,3)
401
+ 00024/0730 7 (256,448,3)
402
+ 00024/0736 7 (256,448,3)
403
+ 00024/0742 7 (256,448,3)
404
+ 00024/0832 7 (256,448,3)
405
+ 00024/0936 7 (256,448,3)
406
+ 00025/0044 7 (256,448,3)
407
+ 00025/0047 7 (256,448,3)
408
+ 00025/0540 7 (256,448,3)
409
+ 00025/0552 7 (256,448,3)
410
+ 00025/0554 7 (256,448,3)
411
+ 00025/0559 7 (256,448,3)
412
+ 00025/0572 7 (256,448,3)
413
+ 00025/0576 7 (256,448,3)
414
+ 00025/0699 7 (256,448,3)
415
+ 00025/0709 7 (256,448,3)
416
+ 00025/0743 7 (256,448,3)
417
+ 00025/0767 7 (256,448,3)
418
+ 00025/0780 7 (256,448,3)
419
+ 00025/0782 7 (256,448,3)
420
+ 00025/0784 7 (256,448,3)
421
+ 00025/0791 7 (256,448,3)
422
+ 00025/0889 7 (256,448,3)
423
+ 00025/0890 7 (256,448,3)
424
+ 00025/0894 7 (256,448,3)
425
+ 00025/0896 7 (256,448,3)
426
+ 00025/0898 7 (256,448,3)
427
+ 00025/0905 7 (256,448,3)
428
+ 00025/0999 7 (256,448,3)
429
+ 00026/0003 7 (256,448,3)
430
+ 00026/0005 7 (256,448,3)
431
+ 00026/0011 7 (256,448,3)
432
+ 00026/0017 7 (256,448,3)
433
+ 00026/0036 7 (256,448,3)
434
+ 00026/0129 7 (256,448,3)
435
+ 00026/0131 7 (256,448,3)
436
+ 00026/0161 7 (256,448,3)
437
+ 00026/0177 7 (256,448,3)
438
+ 00026/0178 7 (256,448,3)
439
+ 00026/0180 7 (256,448,3)
440
+ 00026/0298 7 (256,448,3)
441
+ 00026/0307 7 (256,448,3)
442
+ 00026/0308 7 (256,448,3)
443
+ 00026/0312 7 (256,448,3)
444
+ 00026/0352 7 (256,448,3)
445
+ 00026/0440 7 (256,448,3)
446
+ 00026/0706 7 (256,448,3)
447
+ 00026/0708 7 (256,448,3)
448
+ 00026/0715 7 (256,448,3)
449
+ 00026/0769 7 (256,448,3)
450
+ 00026/0777 7 (256,448,3)
451
+ 00026/0779 7 (256,448,3)
452
+ 00026/0789 7 (256,448,3)
453
+ 00026/0924 7 (256,448,3)
454
+ 00026/0928 7 (256,448,3)
455
+ 00026/0932 7 (256,448,3)
456
+ 00026/0935 7 (256,448,3)
457
+ 00027/0118 7 (256,448,3)
458
+ 00027/0121 7 (256,448,3)
459
+ 00027/0155 7 (256,448,3)
460
+ 00027/0168 7 (256,448,3)
461
+ 00027/0196 7 (256,448,3)
462
+ 00027/0289 7 (256,448,3)
463
+ 00027/0294 7 (256,448,3)
464
+ 00027/0803 7 (256,448,3)
465
+ 00028/0016 7 (256,448,3)
466
+ 00028/0045 7 (256,448,3)
467
+ 00028/0063 7 (256,448,3)
468
+ 00028/0601 7 (256,448,3)
469
+ 00028/0638 7 (256,448,3)
470
+ 00028/0733 7 (256,448,3)
471
+ 00028/0736 7 (256,448,3)
472
+ 00028/0741 7 (256,448,3)
473
+ 00028/0753 7 (256,448,3)
474
+ 00028/0770 7 (256,448,3)
475
+ 00028/0771 7 (256,448,3)
476
+ 00028/0777 7 (256,448,3)
477
+ 00028/0950 7 (256,448,3)
478
+ 00028/0951 7 (256,448,3)
479
+ 00029/0048 7 (256,448,3)
480
+ 00029/0060 7 (256,448,3)
481
+ 00029/0362 7 (256,448,3)
482
+ 00029/0399 7 (256,448,3)
483
+ 00029/0404 7 (256,448,3)
484
+ 00029/0412 7 (256,448,3)
485
+ 00029/0416 7 (256,448,3)
486
+ 00029/0418 7 (256,448,3)
487
+ 00029/0428 7 (256,448,3)
488
+ 00030/0131 7 (256,448,3)
489
+ 00030/0135 7 (256,448,3)
490
+ 00030/0150 7 (256,448,3)
491
+ 00030/0245 7 (256,448,3)
492
+ 00030/0339 7 (256,448,3)
493
+ 00030/0472 7 (256,448,3)
494
+ 00030/0482 7 (256,448,3)
495
+ 00030/0500 7 (256,448,3)
496
+ 00030/0501 7 (256,448,3)
497
+ 00030/0697 7 (256,448,3)
498
+ 00030/0707 7 (256,448,3)
499
+ 00030/0733 7 (256,448,3)
500
+ 00030/0743 7 (256,448,3)
501
+ 00030/0747 7 (256,448,3)
502
+ 00030/0754 7 (256,448,3)
503
+ 00030/0755 7 (256,448,3)
504
+ 00030/0759 7 (256,448,3)
505
+ 00030/0762 7 (256,448,3)
506
+ 00030/0764 7 (256,448,3)
507
+ 00030/0767 7 (256,448,3)
508
+ 00030/0794 7 (256,448,3)
509
+ 00030/0796 7 (256,448,3)
510
+ 00030/0799 7 (256,448,3)
511
+ 00030/0814 7 (256,448,3)
512
+ 00030/0823 7 (256,448,3)
513
+ 00030/0829 7 (256,448,3)
514
+ 00030/0833 7 (256,448,3)
515
+ 00030/0848 7 (256,448,3)
516
+ 00030/0853 7 (256,448,3)
517
+ 00030/0861 7 (256,448,3)
518
+ 00031/0182 7 (256,448,3)
519
+ 00031/0275 7 (256,448,3)
520
+ 00031/0279 7 (256,448,3)
521
+ 00031/0555 7 (256,448,3)
522
+ 00031/0648 7 (256,448,3)
523
+ 00031/0663 7 (256,448,3)
524
+ 00031/0680 7 (256,448,3)
525
+ 00031/0880 7 (256,448,3)
526
+ 00031/0922 7 (256,448,3)
527
+ 00031/0925 7 (256,448,3)
528
+ 00031/0928 7 (256,448,3)
529
+ 00032/0025 7 (256,448,3)
530
+ 00032/0377 7 (256,448,3)
531
+ 00032/0378 7 (256,448,3)
532
+ 00032/0382 7 (256,448,3)
533
+ 00032/0384 7 (256,448,3)
534
+ 00032/0386 7 (256,448,3)
535
+ 00032/0389 7 (256,448,3)
536
+ 00032/0391 7 (256,448,3)
537
+ 00032/0393 7 (256,448,3)
538
+ 00032/0492 7 (256,448,3)
539
+ 00032/0497 7 (256,448,3)
540
+ 00032/0505 7 (256,448,3)
541
+ 00032/0523 7 (256,448,3)
542
+ 00032/0542 7 (256,448,3)
543
+ 00032/0544 7 (256,448,3)
544
+ 00032/0712 7 (256,448,3)
545
+ 00032/0847 7 (256,448,3)
546
+ 00032/0850 7 (256,448,3)
547
+ 00032/0875 7 (256,448,3)
548
+ 00033/0062 7 (256,448,3)
549
+ 00033/0063 7 (256,448,3)
550
+ 00033/0098 7 (256,448,3)
551
+ 00033/0101 7 (256,448,3)
552
+ 00033/0105 7 (256,448,3)
553
+ 00033/0114 7 (256,448,3)
554
+ 00033/0432 7 (256,448,3)
555
+ 00033/0441 7 (256,448,3)
556
+ 00033/0606 7 (256,448,3)
557
+ 00033/0611 7 (256,448,3)
558
+ 00033/0634 7 (256,448,3)
559
+ 00033/0787 7 (256,448,3)
560
+ 00033/0792 7 (256,448,3)
561
+ 00033/0802 7 (256,448,3)
562
+ 00033/0825 7 (256,448,3)
563
+ 00033/0835 7 (256,448,3)
564
+ 00034/0249 7 (256,448,3)
565
+ 00034/0253 7 (256,448,3)
566
+ 00034/0254 7 (256,448,3)
567
+ 00034/0282 7 (256,448,3)
568
+ 00034/0318 7 (256,448,3)
569
+ 00034/0319 7 (256,448,3)
570
+ 00034/0323 7 (256,448,3)
571
+ 00034/0336 7 (256,448,3)
572
+ 00034/0348 7 (256,448,3)
573
+ 00034/0356 7 (256,448,3)
574
+ 00034/0379 7 (256,448,3)
575
+ 00034/0387 7 (256,448,3)
576
+ 00034/0575 7 (256,448,3)
577
+ 00034/0608 7 (256,448,3)
578
+ 00034/0663 7 (256,448,3)
579
+ 00034/0811 7 (256,448,3)
580
+ 00034/0812 7 (256,448,3)
581
+ 00034/0946 7 (256,448,3)
582
+ 00034/0948 7 (256,448,3)
583
+ 00034/0950 7 (256,448,3)
584
+ 00035/0204 7 (256,448,3)
585
+ 00035/0243 7 (256,448,3)
586
+ 00035/0308 7 (256,448,3)
587
+ 00035/0465 7 (256,448,3)
588
+ 00035/0478 7 (256,448,3)
589
+ 00035/0523 7 (256,448,3)
590
+ 00035/0540 7 (256,448,3)
591
+ 00035/0544 7 (256,448,3)
592
+ 00035/0556 7 (256,448,3)
593
+ 00035/0568 7 (256,448,3)
594
+ 00035/0570 7 (256,448,3)
595
+ 00035/0609 7 (256,448,3)
596
+ 00035/0643 7 (256,448,3)
597
+ 00035/0644 7 (256,448,3)
598
+ 00035/0645 7 (256,448,3)
599
+ 00035/0646 7 (256,448,3)
600
+ 00035/0650 7 (256,448,3)
601
+ 00035/0661 7 (256,448,3)
602
+ 00035/0724 7 (256,448,3)
603
+ 00035/0725 7 (256,448,3)
604
+ 00035/0850 7 (256,448,3)
605
+ 00035/0863 7 (256,448,3)
606
+ 00035/0870 7 (256,448,3)
607
+ 00035/0951 7 (256,448,3)
608
+ 00036/0038 7 (256,448,3)
609
+ 00036/0062 7 (256,448,3)
610
+ 00036/0423 7 (256,448,3)
611
+ 00036/0737 7 (256,448,3)
612
+ 00036/0750 7 (256,448,3)
613
+ 00036/0751 7 (256,448,3)
614
+ 00036/0754 7 (256,448,3)
615
+ 00036/0929 7 (256,448,3)
616
+ 00037/0085 7 (256,448,3)
617
+ 00037/0113 7 (256,448,3)
618
+ 00037/0130 7 (256,448,3)
619
+ 00037/0153 7 (256,448,3)
620
+ 00037/0169 7 (256,448,3)
621
+ 00037/0263 7 (256,448,3)
622
+ 00037/0272 7 (256,448,3)
623
+ 00037/0273 7 (256,448,3)
624
+ 00037/0275 7 (256,448,3)
625
+ 00037/0280 7 (256,448,3)
626
+ 00037/0399 7 (256,448,3)
627
+ 00037/0456 7 (256,448,3)
628
+ 00037/0853 7 (256,448,3)
629
+ 00037/0855 7 (256,448,3)
630
+ 00037/0856 7 (256,448,3)
631
+ 00037/0857 7 (256,448,3)
632
+ 00037/0925 7 (256,448,3)
633
+ 00037/0947 7 (256,448,3)
634
+ 00038/0148 7 (256,448,3)
635
+ 00038/0533 7 (256,448,3)
636
+ 00038/0534 7 (256,448,3)
637
+ 00038/0560 7 (256,448,3)
638
+ 00038/0562 7 (256,448,3)
639
+ 00038/0566 7 (256,448,3)
640
+ 00038/0578 7 (256,448,3)
641
+ 00038/0652 7 (256,448,3)
642
+ 00038/0674 7 (256,448,3)
643
+ 00038/0685 7 (256,448,3)
644
+ 00038/0686 7 (256,448,3)
645
+ 00038/0692 7 (256,448,3)
646
+ 00038/0736 7 (256,448,3)
647
+ 00039/0035 7 (256,448,3)
648
+ 00039/0105 7 (256,448,3)
649
+ 00039/0109 7 (256,448,3)
650
+ 00039/0121 7 (256,448,3)
651
+ 00039/0128 7 (256,448,3)
652
+ 00039/0129 7 (256,448,3)
653
+ 00039/0132 7 (256,448,3)
654
+ 00039/0137 7 (256,448,3)
655
+ 00039/0157 7 (256,448,3)
656
+ 00039/0496 7 (256,448,3)
657
+ 00039/0502 7 (256,448,3)
658
+ 00039/0526 7 (256,448,3)
659
+ 00039/0529 7 (256,448,3)
660
+ 00039/0682 7 (256,448,3)
661
+ 00039/0690 7 (256,448,3)
662
+ 00039/0693 7 (256,448,3)
663
+ 00039/0703 7 (256,448,3)
664
+ 00039/0725 7 (256,448,3)
665
+ 00039/0734 7 (256,448,3)
666
+ 00040/0518 7 (256,448,3)
667
+ 00040/0728 7 (256,448,3)
668
+ 00040/0774 7 (256,448,3)
669
+ 00040/0812 7 (256,448,3)
670
+ 00040/0818 7 (256,448,3)
671
+ 00040/0827 7 (256,448,3)
672
+ 00040/0914 7 (256,448,3)
673
+ 00040/0917 7 (256,448,3)
674
+ 00040/0918 7 (256,448,3)
675
+ 00040/0924 7 (256,448,3)
676
+ 00040/0925 7 (256,448,3)
677
+ 00041/0004 7 (256,448,3)
678
+ 00041/0006 7 (256,448,3)
679
+ 00041/0013 7 (256,448,3)
680
+ 00041/0059 7 (256,448,3)
681
+ 00041/0110 7 (256,448,3)
682
+ 00041/0291 7 (256,448,3)
683
+ 00041/0366 7 (256,448,3)
684
+ 00041/0388 7 (256,448,3)
685
+ 00041/0434 7 (256,448,3)
686
+ 00041/0436 7 (256,448,3)
687
+ 00041/0450 7 (256,448,3)
688
+ 00041/0457 7 (256,448,3)
689
+ 00041/0460 7 (256,448,3)
690
+ 00041/0468 7 (256,448,3)
691
+ 00041/0471 7 (256,448,3)
692
+ 00041/0474 7 (256,448,3)
693
+ 00041/0809 7 (256,448,3)
694
+ 00041/0844 7 (256,448,3)
695
+ 00041/0858 7 (256,448,3)
696
+ 00041/0874 7 (256,448,3)
697
+ 00041/0876 7 (256,448,3)
698
+ 00042/0020 7 (256,448,3)
699
+ 00042/0205 7 (256,448,3)
700
+ 00042/0206 7 (256,448,3)
701
+ 00042/0432 7 (256,448,3)
702
+ 00042/0563 7 (256,448,3)
703
+ 00042/0569 7 (256,448,3)
704
+ 00042/0575 7 (256,448,3)
705
+ 00042/0576 7 (256,448,3)
706
+ 00042/0888 7 (256,448,3)
707
+ 00042/0892 7 (256,448,3)
708
+ 00042/0943 7 (256,448,3)
709
+ 00042/0944 7 (256,448,3)
710
+ 00043/0126 7 (256,448,3)
711
+ 00043/0130 7 (256,448,3)
712
+ 00043/0136 7 (256,448,3)
713
+ 00043/0233 7 (256,448,3)
714
+ 00043/0235 7 (256,448,3)
715
+ 00043/0237 7 (256,448,3)
716
+ 00043/0277 7 (256,448,3)
717
+ 00043/0301 7 (256,448,3)
718
+ 00043/0302 7 (256,448,3)
719
+ 00043/0303 7 (256,448,3)
720
+ 00043/0308 7 (256,448,3)
721
+ 00043/0309 7 (256,448,3)
722
+ 00043/0314 7 (256,448,3)
723
+ 00043/0713 7 (256,448,3)
724
+ 00043/0715 7 (256,448,3)
725
+ 00043/0923 7 (256,448,3)
726
+ 00044/0095 7 (256,448,3)
727
+ 00044/0255 7 (256,448,3)
728
+ 00044/0864 7 (256,448,3)
729
+ 00044/0892 7 (256,448,3)
730
+ 00044/0898 7 (256,448,3)
731
+ 00044/0993 7 (256,448,3)
732
+ 00044/0995 7 (256,448,3)
733
+ 00044/0997 7 (256,448,3)
734
+ 00045/0001 7 (256,448,3)
735
+ 00045/0006 7 (256,448,3)
736
+ 00045/0269 7 (256,448,3)
737
+ 00045/0276 7 (256,448,3)
738
+ 00045/0280 7 (256,448,3)
739
+ 00045/0281 7 (256,448,3)
740
+ 00045/0282 7 (256,448,3)
741
+ 00045/0284 7 (256,448,3)
742
+ 00045/0550 7 (256,448,3)
743
+ 00045/0571 7 (256,448,3)
744
+ 00045/0629 7 (256,448,3)
745
+ 00045/0631 7 (256,448,3)
746
+ 00045/0659 7 (256,448,3)
747
+ 00045/0693 7 (256,448,3)
748
+ 00045/0807 7 (256,448,3)
749
+ 00045/0810 7 (256,448,3)
750
+ 00045/0826 7 (256,448,3)
751
+ 00045/0849 7 (256,448,3)
752
+ 00045/0946 7 (256,448,3)
753
+ 00045/0987 7 (256,448,3)
754
+ 00045/0990 7 (256,448,3)
755
+ 00046/0104 7 (256,448,3)
756
+ 00046/0477 7 (256,448,3)
757
+ 00046/0490 7 (256,448,3)
758
+ 00046/0491 7 (256,448,3)
759
+ 00046/0509 7 (256,448,3)
760
+ 00046/0513 7 (256,448,3)
761
+ 00046/0603 7 (256,448,3)
762
+ 00046/0723 7 (256,448,3)
763
+ 00046/0744 7 (256,448,3)
764
+ 00046/0746 7 (256,448,3)
765
+ 00046/0750 7 (256,448,3)
766
+ 00046/0852 7 (256,448,3)
767
+ 00046/0927 7 (256,448,3)
768
+ 00046/0928 7 (256,448,3)
769
+ 00046/0929 7 (256,448,3)
770
+ 00046/0931 7 (256,448,3)
771
+ 00046/0936 7 (256,448,3)
772
+ 00046/0939 7 (256,448,3)
773
+ 00046/0947 7 (256,448,3)
774
+ 00046/0948 7 (256,448,3)
775
+ 00046/0950 7 (256,448,3)
776
+ 00046/0955 7 (256,448,3)
777
+ 00046/0961 7 (256,448,3)
778
+ 00047/0023 7 (256,448,3)
779
+ 00047/0029 7 (256,448,3)
780
+ 00047/0035 7 (256,448,3)
781
+ 00047/0058 7 (256,448,3)
782
+ 00047/0061 7 (256,448,3)
783
+ 00047/0065 7 (256,448,3)
784
+ 00047/0068 7 (256,448,3)
785
+ 00047/0072 7 (256,448,3)
786
+ 00047/0074 7 (256,448,3)
787
+ 00047/0148 7 (256,448,3)
788
+ 00047/0594 7 (256,448,3)
789
+ 00047/0782 7 (256,448,3)
790
+ 00047/0787 7 (256,448,3)
791
+ 00047/0860 7 (256,448,3)
792
+ 00047/0889 7 (256,448,3)
793
+ 00047/0893 7 (256,448,3)
794
+ 00047/0894 7 (256,448,3)
795
+ 00047/0902 7 (256,448,3)
796
+ 00047/0975 7 (256,448,3)
797
+ 00047/0995 7 (256,448,3)
798
+ 00048/0033 7 (256,448,3)
799
+ 00048/0113 7 (256,448,3)
800
+ 00048/0115 7 (256,448,3)
801
+ 00048/0120 7 (256,448,3)
802
+ 00048/0129 7 (256,448,3)
803
+ 00048/0136 7 (256,448,3)
804
+ 00048/0327 7 (256,448,3)
805
+ 00048/0329 7 (256,448,3)
806
+ 00048/0341 7 (256,448,3)
807
+ 00048/0343 7 (256,448,3)
808
+ 00048/0345 7 (256,448,3)
809
+ 00048/0346 7 (256,448,3)
810
+ 00048/0355 7 (256,448,3)
811
+ 00048/0359 7 (256,448,3)
812
+ 00048/0363 7 (256,448,3)
813
+ 00048/0378 7 (256,448,3)
814
+ 00048/0386 7 (256,448,3)
815
+ 00048/0387 7 (256,448,3)
816
+ 00048/0388 7 (256,448,3)
817
+ 00048/0428 7 (256,448,3)
818
+ 00048/0439 7 (256,448,3)
819
+ 00048/0507 7 (256,448,3)
820
+ 00048/0510 7 (256,448,3)
821
+ 00048/0512 7 (256,448,3)
822
+ 00048/0514 7 (256,448,3)
823
+ 00048/0539 7 (256,448,3)
824
+ 00048/0542 7 (256,448,3)
825
+ 00048/0544 7 (256,448,3)
826
+ 00048/0631 7 (256,448,3)
827
+ 00048/0632 7 (256,448,3)
828
+ 00048/0636 7 (256,448,3)
829
+ 00048/0640 7 (256,448,3)
830
+ 00048/0644 7 (256,448,3)
831
+ 00048/0653 7 (256,448,3)
832
+ 00048/0655 7 (256,448,3)
833
+ 00048/0658 7 (256,448,3)
834
+ 00048/0667 7 (256,448,3)
835
+ 00048/0688 7 (256,448,3)
836
+ 00048/0708 7 (256,448,3)
837
+ 00049/0005 7 (256,448,3)
838
+ 00049/0074 7 (256,448,3)
839
+ 00049/0077 7 (256,448,3)
840
+ 00049/0084 7 (256,448,3)
841
+ 00049/0516 7 (256,448,3)
842
+ 00049/0800 7 (256,448,3)
843
+ 00049/0900 7 (256,448,3)
844
+ 00050/0607 7 (256,448,3)
845
+ 00050/0661 7 (256,448,3)
846
+ 00050/0665 7 (256,448,3)
847
+ 00050/0685 7 (256,448,3)
848
+ 00050/0711 7 (256,448,3)
849
+ 00051/0068 7 (256,448,3)
850
+ 00051/0069 7 (256,448,3)
851
+ 00051/0076 7 (256,448,3)
852
+ 00051/0569 7 (256,448,3)
853
+ 00051/0801 7 (256,448,3)
854
+ 00051/0927 7 (256,448,3)
855
+ 00051/0945 7 (256,448,3)
856
+ 00051/0952 7 (256,448,3)
857
+ 00051/0976 7 (256,448,3)
858
+ 00051/0985 7 (256,448,3)
859
+ 00052/0012 7 (256,448,3)
860
+ 00052/0015 7 (256,448,3)
861
+ 00052/0052 7 (256,448,3)
862
+ 00052/0056 7 (256,448,3)
863
+ 00052/0060 7 (256,448,3)
864
+ 00052/0157 7 (256,448,3)
865
+ 00052/0265 7 (256,448,3)
866
+ 00052/0788 7 (256,448,3)
867
+ 00052/0790 7 (256,448,3)
868
+ 00052/0793 7 (256,448,3)
869
+ 00052/0816 7 (256,448,3)
870
+ 00052/0824 7 (256,448,3)
871
+ 00052/0918 7 (256,448,3)
872
+ 00052/0933 7 (256,448,3)
873
+ 00052/0947 7 (256,448,3)
874
+ 00053/0232 7 (256,448,3)
875
+ 00053/0277 7 (256,448,3)
876
+ 00053/0362 7 (256,448,3)
877
+ 00053/0577 7 (256,448,3)
878
+ 00053/0609 7 (256,448,3)
879
+ 00053/0612 7 (256,448,3)
880
+ 00053/0628 7 (256,448,3)
881
+ 00053/0629 7 (256,448,3)
882
+ 00053/0633 7 (256,448,3)
883
+ 00053/0659 7 (256,448,3)
884
+ 00053/0667 7 (256,448,3)
885
+ 00053/0671 7 (256,448,3)
886
+ 00053/0797 7 (256,448,3)
887
+ 00053/0804 7 (256,448,3)
888
+ 00053/0807 7 (256,448,3)
889
+ 00053/0952 7 (256,448,3)
890
+ 00053/0970 7 (256,448,3)
891
+ 00053/0981 7 (256,448,3)
892
+ 00053/0999 7 (256,448,3)
893
+ 00054/0003 7 (256,448,3)
894
+ 00054/0013 7 (256,448,3)
895
+ 00054/0020 7 (256,448,3)
896
+ 00054/0022 7 (256,448,3)
897
+ 00054/0023 7 (256,448,3)
898
+ 00054/0044 7 (256,448,3)
899
+ 00054/0051 7 (256,448,3)
900
+ 00054/0063 7 (256,448,3)
901
+ 00054/0065 7 (256,448,3)
902
+ 00054/0145 7 (256,448,3)
903
+ 00054/0153 7 (256,448,3)
904
+ 00054/0203 7 (256,448,3)
905
+ 00054/0325 7 (256,448,3)
906
+ 00054/0445 7 (256,448,3)
907
+ 00054/0448 7 (256,448,3)
908
+ 00054/0456 7 (256,448,3)
909
+ 00054/0457 7 (256,448,3)
910
+ 00054/0519 7 (256,448,3)
911
+ 00054/0524 7 (256,448,3)
912
+ 00054/0530 7 (256,448,3)
913
+ 00054/0532 7 (256,448,3)
914
+ 00054/0535 7 (256,448,3)
915
+ 00054/0574 7 (256,448,3)
916
+ 00054/0760 7 (256,448,3)
917
+ 00054/0767 7 (256,448,3)
918
+ 00054/0837 7 (256,448,3)
919
+ 00055/0011 7 (256,448,3)
920
+ 00055/0109 7 (256,448,3)
921
+ 00055/0111 7 (256,448,3)
922
+ 00055/0117 7 (256,448,3)
923
+ 00055/0119 7 (256,448,3)
924
+ 00055/0182 7 (256,448,3)
925
+ 00055/0192 7 (256,448,3)
926
+ 00055/0193 7 (256,448,3)
927
+ 00055/0200 7 (256,448,3)
928
+ 00055/0204 7 (256,448,3)
929
+ 00055/0207 7 (256,448,3)
930
+ 00055/0212 7 (256,448,3)
931
+ 00055/0213 7 (256,448,3)
932
+ 00055/0348 7 (256,448,3)
933
+ 00055/0423 7 (256,448,3)
934
+ 00055/0427 7 (256,448,3)
935
+ 00055/0456 7 (256,448,3)
936
+ 00055/0489 7 (256,448,3)
937
+ 00055/0689 7 (256,448,3)
938
+ 00055/0753 7 (256,448,3)
939
+ 00055/0802 7 (256,448,3)
940
+ 00055/0844 7 (256,448,3)
941
+ 00055/0850 7 (256,448,3)
942
+ 00055/0982 7 (256,448,3)
943
+ 00055/0993 7 (256,448,3)
944
+ 00056/0113 7 (256,448,3)
945
+ 00056/0148 7 (256,448,3)
946
+ 00056/0151 7 (256,448,3)
947
+ 00056/0316 7 (256,448,3)
948
+ 00056/0379 7 (256,448,3)
949
+ 00056/0380 7 (256,448,3)
950
+ 00056/0385 7 (256,448,3)
951
+ 00056/0505 7 (256,448,3)
952
+ 00056/0579 7 (256,448,3)
953
+ 00057/0254 7 (256,448,3)
954
+ 00057/0264 7 (256,448,3)
955
+ 00057/0272 7 (256,448,3)
956
+ 00057/0403 7 (256,448,3)
957
+ 00057/0501 7 (256,448,3)
958
+ 00057/0503 7 (256,448,3)
959
+ 00057/0884 7 (256,448,3)
960
+ 00058/0026 7 (256,448,3)
961
+ 00058/0029 7 (256,448,3)
962
+ 00058/0104 7 (256,448,3)
963
+ 00058/0124 7 (256,448,3)
964
+ 00058/0162 7 (256,448,3)
965
+ 00058/0288 7 (256,448,3)
966
+ 00058/0289 7 (256,448,3)
967
+ 00058/0323 7 (256,448,3)
968
+ 00058/0328 7 (256,448,3)
969
+ 00058/0329 7 (256,448,3)
970
+ 00058/0337 7 (256,448,3)
971
+ 00058/0367 7 (256,448,3)
972
+ 00058/0383 7 (256,448,3)
973
+ 00058/0395 7 (256,448,3)
974
+ 00060/0178 7 (256,448,3)
975
+ 00060/0182 7 (256,448,3)
976
+ 00061/0001 7 (256,448,3)
977
+ 00061/0003 7 (256,448,3)
978
+ 00061/0006 7 (256,448,3)
979
+ 00061/0443 7 (256,448,3)
980
+ 00061/0586 7 (256,448,3)
981
+ 00061/0587 7 (256,448,3)
982
+ 00061/0774 7 (256,448,3)
983
+ 00061/0789 7 (256,448,3)
984
+ 00061/0815 7 (256,448,3)
985
+ 00061/0817 7 (256,448,3)
986
+ 00061/0826 7 (256,448,3)
987
+ 00061/0829 7 (256,448,3)
988
+ 00061/0830 7 (256,448,3)
989
+ 00061/0832 7 (256,448,3)
990
+ 00061/0833 7 (256,448,3)
991
+ 00061/0836 7 (256,448,3)
992
+ 00061/0837 7 (256,448,3)
993
+ 00061/0839 7 (256,448,3)
994
+ 00061/0843 7 (256,448,3)
995
+ 00061/0849 7 (256,448,3)
996
+ 00061/0859 7 (256,448,3)
997
+ 00061/0861 7 (256,448,3)
998
+ 00061/0868 7 (256,448,3)
999
+ 00061/0877 7 (256,448,3)
1000
+ 00061/0889 7 (256,448,3)
1001
+ 00061/0905 7 (256,448,3)
1002
+ 00062/0115 7 (256,448,3)
1003
+ 00062/0118 7 (256,448,3)
1004
+ 00062/0125 7 (256,448,3)
1005
+ 00062/0134 7 (256,448,3)
1006
+ 00062/0142 7 (256,448,3)
1007
+ 00062/0400 7 (256,448,3)
1008
+ 00062/0457 7 (256,448,3)
1009
+ 00062/0459 7 (256,448,3)
1010
+ 00062/0560 7 (256,448,3)
1011
+ 00062/0650 7 (256,448,3)
1012
+ 00062/0655 7 (256,448,3)
1013
+ 00062/0715 7 (256,448,3)
1014
+ 00062/0847 7 (256,448,3)
1015
+ 00062/0905 7 (256,448,3)
1016
+ 00062/0981 7 (256,448,3)
1017
+ 00063/0177 7 (256,448,3)
1018
+ 00063/0230 7 (256,448,3)
1019
+ 00063/0253 7 (256,448,3)
1020
+ 00063/0257 7 (256,448,3)
1021
+ 00063/0326 7 (256,448,3)
1022
+ 00063/0530 7 (256,448,3)
1023
+ 00063/0677 7 (256,448,3)
1024
+ 00063/0759 7 (256,448,3)
1025
+ 00063/0761 7 (256,448,3)
1026
+ 00063/0777 7 (256,448,3)
1027
+ 00063/0842 7 (256,448,3)
1028
+ 00063/0900 7 (256,448,3)
1029
+ 00064/0014 7 (256,448,3)
1030
+ 00064/0028 7 (256,448,3)
1031
+ 00064/0029 7 (256,448,3)
1032
+ 00064/0030 7 (256,448,3)
1033
+ 00064/0037 7 (256,448,3)
1034
+ 00064/0044 7 (256,448,3)
1035
+ 00064/0280 7 (256,448,3)
1036
+ 00064/0285 7 (256,448,3)
1037
+ 00064/0286 7 (256,448,3)
1038
+ 00064/0291 7 (256,448,3)
1039
+ 00064/0300 7 (256,448,3)
1040
+ 00064/0303 7 (256,448,3)
1041
+ 00064/0308 7 (256,448,3)
1042
+ 00064/0314 7 (256,448,3)
1043
+ 00064/0316 7 (256,448,3)
1044
+ 00064/0317 7 (256,448,3)
1045
+ 00064/0323 7 (256,448,3)
1046
+ 00064/0435 7 (256,448,3)
1047
+ 00064/0733 7 (256,448,3)
1048
+ 00064/0848 7 (256,448,3)
1049
+ 00064/0868 7 (256,448,3)
1050
+ 00064/0888 7 (256,448,3)
1051
+ 00064/0898 7 (256,448,3)
1052
+ 00065/0116 7 (256,448,3)
1053
+ 00065/0121 7 (256,448,3)
1054
+ 00065/0122 7 (256,448,3)
1055
+ 00065/0124 7 (256,448,3)
1056
+ 00065/0125 7 (256,448,3)
1057
+ 00065/0126 7 (256,448,3)
1058
+ 00065/0136 7 (256,448,3)
1059
+ 00065/0146 7 (256,448,3)
1060
+ 00065/0147 7 (256,448,3)
1061
+ 00065/0163 7 (256,448,3)
1062
+ 00065/0170 7 (256,448,3)
1063
+ 00065/0175 7 (256,448,3)
1064
+ 00065/0176 7 (256,448,3)
1065
+ 00065/0180 7 (256,448,3)
1066
+ 00065/0184 7 (256,448,3)
1067
+ 00065/0186 7 (256,448,3)
1068
+ 00065/0332 7 (256,448,3)
1069
+ 00065/0343 7 (256,448,3)
1070
+ 00065/0365 7 (256,448,3)
1071
+ 00065/0393 7 (256,448,3)
1072
+ 00065/0394 7 (256,448,3)
1073
+ 00065/0442 7 (256,448,3)
1074
+ 00065/0459 7 (256,448,3)
1075
+ 00065/0462 7 (256,448,3)
1076
+ 00065/0476 7 (256,448,3)
1077
+ 00065/0483 7 (256,448,3)
1078
+ 00065/0590 7 (256,448,3)
1079
+ 00065/0593 7 (256,448,3)
1080
+ 00065/0595 7 (256,448,3)
1081
+ 00065/0774 7 (256,448,3)
1082
+ 00065/0947 7 (256,448,3)
1083
+ 00065/0985 7 (256,448,3)
1084
+ 00065/0986 7 (256,448,3)
1085
+ 00066/0015 7 (256,448,3)
1086
+ 00066/0043 7 (256,448,3)
1087
+ 00066/0131 7 (256,448,3)
1088
+ 00066/0157 7 (256,448,3)
1089
+ 00066/0169 7 (256,448,3)
1090
+ 00066/0374 7 (256,448,3)
1091
+ 00066/0382 7 (256,448,3)
1092
+ 00066/0481 7 (256,448,3)
1093
+ 00066/0482 7 (256,448,3)
1094
+ 00066/0491 7 (256,448,3)
1095
+ 00066/0493 7 (256,448,3)
1096
+ 00066/0494 7 (256,448,3)
1097
+ 00066/0496 7 (256,448,3)
1098
+ 00066/0680 7 (256,448,3)
1099
+ 00066/0700 7 (256,448,3)
1100
+ 00066/0887 7 (256,448,3)
1101
+ 00066/0910 7 (256,448,3)
1102
+ 00066/0918 7 (256,448,3)
1103
+ 00067/0024 7 (256,448,3)
1104
+ 00067/0059 7 (256,448,3)
1105
+ 00067/0408 7 (256,448,3)
1106
+ 00067/0414 7 (256,448,3)
1107
+ 00067/0417 7 (256,448,3)
1108
+ 00067/0419 7 (256,448,3)
1109
+ 00067/0423 7 (256,448,3)
1110
+ 00067/0441 7 (256,448,3)
1111
+ 00067/0467 7 (256,448,3)
1112
+ 00067/0471 7 (256,448,3)
1113
+ 00067/0487 7 (256,448,3)
1114
+ 00067/0494 7 (256,448,3)
1115
+ 00067/0497 7 (256,448,3)
1116
+ 00067/0513 7 (256,448,3)
1117
+ 00067/0521 7 (256,448,3)
1118
+ 00068/0111 7 (256,448,3)
1119
+ 00068/0123 7 (256,448,3)
1120
+ 00068/0126 7 (256,448,3)
1121
+ 00068/0129 7 (256,448,3)
1122
+ 00068/0270 7 (256,448,3)
1123
+ 00068/0330 7 (256,448,3)
1124
+ 00068/0407 7 (256,448,3)
1125
+ 00068/0428 7 (256,448,3)
1126
+ 00068/0544 7 (256,448,3)
1127
+ 00068/0635 7 (256,448,3)
1128
+ 00068/0637 7 (256,448,3)
1129
+ 00068/0736 7 (256,448,3)
1130
+ 00068/0738 7 (256,448,3)
1131
+ 00068/0747 7 (256,448,3)
1132
+ 00068/0748 7 (256,448,3)
1133
+ 00068/0749 7 (256,448,3)
1134
+ 00068/0762 7 (256,448,3)
1135
+ 00068/0815 7 (256,448,3)
1136
+ 00068/0981 7 (256,448,3)
1137
+ 00068/0982 7 (256,448,3)
1138
+ 00069/0187 7 (256,448,3)
1139
+ 00069/0191 7 (256,448,3)
1140
+ 00070/0001 7 (256,448,3)
1141
+ 00070/0003 7 (256,448,3)
1142
+ 00070/0340 7 (256,448,3)
1143
+ 00070/0341 7 (256,448,3)
1144
+ 00070/0342 7 (256,448,3)
1145
+ 00070/0347 7 (256,448,3)
1146
+ 00070/0372 7 (256,448,3)
1147
+ 00070/0383 7 (256,448,3)
1148
+ 00070/0389 7 (256,448,3)
1149
+ 00070/0728 7 (256,448,3)
1150
+ 00070/0813 7 (256,448,3)
1151
+ 00070/0814 7 (256,448,3)
1152
+ 00070/0823 7 (256,448,3)
1153
+ 00070/0840 7 (256,448,3)
1154
+ 00070/0843 7 (256,448,3)
1155
+ 00070/0861 7 (256,448,3)
1156
+ 00071/0111 7 (256,448,3)
1157
+ 00071/0138 7 (256,448,3)
1158
+ 00071/0143 7 (256,448,3)
1159
+ 00071/0150 7 (256,448,3)
1160
+ 00071/0508 7 (256,448,3)
1161
+ 00071/0514 7 (256,448,3)
1162
+ 00071/0550 7 (256,448,3)
1163
+ 00071/0556 7 (256,448,3)
1164
+ 00071/0600 7 (256,448,3)
1165
+ 00071/0665 7 (256,448,3)
1166
+ 00071/0670 7 (256,448,3)
1167
+ 00071/0672 7 (256,448,3)
1168
+ 00071/0673 7 (256,448,3)
1169
+ 00071/0705 7 (256,448,3)
1170
+ 00071/0706 7 (256,448,3)
1171
+ 00071/0707 7 (256,448,3)
1172
+ 00071/0774 7 (256,448,3)
1173
+ 00071/0799 7 (256,448,3)
1174
+ 00071/0814 7 (256,448,3)
1175
+ 00071/0816 7 (256,448,3)
1176
+ 00071/0819 7 (256,448,3)
1177
+ 00071/0823 7 (256,448,3)
1178
+ 00071/0828 7 (256,448,3)
1179
+ 00071/0830 7 (256,448,3)
1180
+ 00071/0839 7 (256,448,3)
1181
+ 00071/0841 7 (256,448,3)
1182
+ 00072/0192 7 (256,448,3)
1183
+ 00072/0194 7 (256,448,3)
1184
+ 00072/0197 7 (256,448,3)
1185
+ 00072/0199 7 (256,448,3)
1186
+ 00072/0285 7 (256,448,3)
1187
+ 00072/0586 7 (256,448,3)
1188
+ 00072/0795 7 (256,448,3)
1189
+ 00072/0811 7 (256,448,3)
1190
+ 00072/0812 7 (256,448,3)
1191
+ 00072/0824 7 (256,448,3)
1192
+ 00072/0831 7 (256,448,3)
1193
+ 00072/0835 7 (256,448,3)
1194
+ 00072/0837 7 (256,448,3)
1195
+ 00072/0841 7 (256,448,3)
1196
+ 00072/0962 7 (256,448,3)
1197
+ 00073/0296 7 (256,448,3)
1198
+ 00073/0299 7 (256,448,3)
1199
+ 00073/0300 7 (256,448,3)
1200
+ 00073/0301 7 (256,448,3)
1201
+ 00073/0427 7 (256,448,3)
1202
+ 00073/0428 7 (256,448,3)
1203
+ 00073/0494 7 (256,448,3)
1204
+ 00073/0615 7 (256,448,3)
1205
+ 00073/0620 7 (256,448,3)
1206
+ 00073/0624 7 (256,448,3)
1207
+ 00073/0979 7 (256,448,3)
1208
+ 00074/0226 7 (256,448,3)
1209
+ 00074/0250 7 (256,448,3)
1210
+ 00074/0284 7 (256,448,3)
1211
+ 00074/0503 7 (256,448,3)
1212
+ 00074/0614 7 (256,448,3)
1213
+ 00074/0629 7 (256,448,3)
1214
+ 00074/0762 7 (256,448,3)
1215
+ 00074/0765 7 (256,448,3)
1216
+ 00074/0900 7 (256,448,3)
1217
+ 00074/0908 7 (256,448,3)
1218
+ 00075/0352 7 (256,448,3)
1219
+ 00075/0360 7 (256,448,3)
1220
+ 00075/0361 7 (256,448,3)
1221
+ 00075/0365 7 (256,448,3)
1222
+ 00075/0383 7 (256,448,3)
1223
+ 00075/0384 7 (256,448,3)
1224
+ 00075/0386 7 (256,448,3)
1225
+ 00075/0407 7 (256,448,3)
1226
+ 00075/0410 7 (256,448,3)
1227
+ 00075/0412 7 (256,448,3)
1228
+ 00075/0413 7 (256,448,3)
1229
+ 00075/0459 7 (256,448,3)
1230
+ 00075/0504 7 (256,448,3)
1231
+ 00075/0515 7 (256,448,3)
1232
+ 00075/0518 7 (256,448,3)
1233
+ 00075/0567 7 (256,448,3)
1234
+ 00075/0681 7 (256,448,3)
1235
+ 00075/0693 7 (256,448,3)
1236
+ 00075/0728 7 (256,448,3)
1237
+ 00075/0731 7 (256,448,3)
1238
+ 00075/0804 7 (256,448,3)
1239
+ 00075/0974 7 (256,448,3)
1240
+ 00075/0975 7 (256,448,3)
1241
+ 00075/0983 7 (256,448,3)
1242
+ 00075/0997 7 (256,448,3)
1243
+ 00076/0006 7 (256,448,3)
1244
+ 00076/0007 7 (256,448,3)
1245
+ 00076/0011 7 (256,448,3)
1246
+ 00076/0013 7 (256,448,3)
1247
+ 00076/0014 7 (256,448,3)
1248
+ 00076/0027 7 (256,448,3)
1249
+ 00076/0029 7 (256,448,3)
1250
+ 00076/0037 7 (256,448,3)
1251
+ 00076/0041 7 (256,448,3)
1252
+ 00076/0055 7 (256,448,3)
1253
+ 00076/0071 7 (256,448,3)
1254
+ 00076/0172 7 (256,448,3)
1255
+ 00076/0275 7 (256,448,3)
1256
+ 00076/0286 7 (256,448,3)
1257
+ 00076/0467 7 (256,448,3)
1258
+ 00076/0481 7 (256,448,3)
1259
+ 00076/0527 7 (256,448,3)
1260
+ 00076/0895 7 (256,448,3)
1261
+ 00076/0896 7 (256,448,3)
1262
+ 00076/0906 7 (256,448,3)
1263
+ 00076/0924 7 (256,448,3)
1264
+ 00076/0964 7 (256,448,3)
1265
+ 00076/0984 7 (256,448,3)
1266
+ 00077/0317 7 (256,448,3)
1267
+ 00077/0322 7 (256,448,3)
1268
+ 00077/0333 7 (256,448,3)
1269
+ 00077/0334 7 (256,448,3)
1270
+ 00077/0480 7 (256,448,3)
1271
+ 00077/0488 7 (256,448,3)
1272
+ 00077/0490 7 (256,448,3)
1273
+ 00077/0582 7 (256,448,3)
1274
+ 00077/0586 7 (256,448,3)
1275
+ 00077/0969 7 (256,448,3)
1276
+ 00078/0007 7 (256,448,3)
1277
+ 00078/0011 7 (256,448,3)
1278
+ 00078/0153 7 (256,448,3)
1279
+ 00078/0289 7 (256,448,3)
1280
+ 00078/0312 7 (256,448,3)
1281
+ 00078/0492 7 (256,448,3)
1282
+ 00078/0580 7 (256,448,3)
1283
+ 00078/0595 7 (256,448,3)
1284
+ 00078/0814 7 (256,448,3)
1285
+ 00078/0950 7 (256,448,3)
1286
+ 00078/0955 7 (256,448,3)
1287
+ 00079/0060 7 (256,448,3)
1288
+ 00079/0067 7 (256,448,3)
1289
+ 00080/0216 7 (256,448,3)
1290
+ 00080/0308 7 (256,448,3)
1291
+ 00080/0504 7 (256,448,3)
1292
+ 00080/0552 7 (256,448,3)
1293
+ 00080/0576 7 (256,448,3)
1294
+ 00080/0583 7 (256,448,3)
1295
+ 00080/0837 7 (256,448,3)
1296
+ 00080/0839 7 (256,448,3)
1297
+ 00080/0871 7 (256,448,3)
1298
+ 00080/0877 7 (256,448,3)
1299
+ 00080/0880 7 (256,448,3)
1300
+ 00080/0969 7 (256,448,3)
1301
+ 00080/0973 7 (256,448,3)
1302
+ 00080/0980 7 (256,448,3)
1303
+ 00081/0202 7 (256,448,3)
1304
+ 00081/0203 7 (256,448,3)
1305
+ 00081/0210 7 (256,448,3)
1306
+ 00081/0268 7 (256,448,3)
1307
+ 00081/0281 7 (256,448,3)
1308
+ 00081/0283 7 (256,448,3)
1309
+ 00081/0317 7 (256,448,3)
1310
+ 00081/0327 7 (256,448,3)
1311
+ 00082/0018 7 (256,448,3)
1312
+ 00082/0025 7 (256,448,3)
1313
+ 00082/0089 7 (256,448,3)
1314
+ 00082/0140 7 (256,448,3)
1315
+ 00082/0442 7 (256,448,3)
1316
+ 00082/0465 7 (256,448,3)
1317
+ 00082/0473 7 (256,448,3)
1318
+ 00082/0481 7 (256,448,3)
1319
+ 00082/0492 7 (256,448,3)
1320
+ 00082/0495 7 (256,448,3)
1321
+ 00082/0497 7 (256,448,3)
1322
+ 00082/0502 7 (256,448,3)
1323
+ 00082/0504 7 (256,448,3)
1324
+ 00082/0506 7 (256,448,3)
1325
+ 00082/0507 7 (256,448,3)
1326
+ 00082/0510 7 (256,448,3)
1327
+ 00082/0519 7 (256,448,3)
1328
+ 00082/0523 7 (256,448,3)
1329
+ 00082/0588 7 (256,448,3)
1330
+ 00082/0597 7 (256,448,3)
1331
+ 00082/0632 7 (256,448,3)
1332
+ 00082/0751 7 (256,448,3)
1333
+ 00082/0767 7 (256,448,3)
1334
+ 00082/0771 7 (256,448,3)
1335
+ 00082/0790 7 (256,448,3)
1336
+ 00082/0804 7 (256,448,3)
1337
+ 00082/0823 7 (256,448,3)
1338
+ 00083/0052 7 (256,448,3)
1339
+ 00083/0056 7 (256,448,3)
1340
+ 00083/0113 7 (256,448,3)
1341
+ 00083/0114 7 (256,448,3)
1342
+ 00083/0122 7 (256,448,3)
1343
+ 00083/0137 7 (256,448,3)
1344
+ 00083/0270 7 (256,448,3)
1345
+ 00083/0295 7 (256,448,3)
1346
+ 00083/0303 7 (256,448,3)
1347
+ 00083/0308 7 (256,448,3)
1348
+ 00083/0586 7 (256,448,3)
1349
+ 00083/0592 7 (256,448,3)
1350
+ 00083/0640 7 (256,448,3)
1351
+ 00083/0648 7 (256,448,3)
1352
+ 00083/0654 7 (256,448,3)
1353
+ 00083/0662 7 (256,448,3)
1354
+ 00083/0666 7 (256,448,3)
1355
+ 00083/0668 7 (256,448,3)
1356
+ 00083/0669 7 (256,448,3)
1357
+ 00083/0675 7 (256,448,3)
1358
+ 00083/0679 7 (256,448,3)
1359
+ 00083/0681 7 (256,448,3)
1360
+ 00083/0682 7 (256,448,3)
1361
+ 00083/0694 7 (256,448,3)
1362
+ 00083/0695 7 (256,448,3)
1363
+ 00083/0697 7 (256,448,3)
1364
+ 00083/0704 7 (256,448,3)
1365
+ 00083/0713 7 (256,448,3)
1366
+ 00083/0721 7 (256,448,3)
1367
+ 00083/0855 7 (256,448,3)
1368
+ 00084/0109 7 (256,448,3)
1369
+ 00084/0113 7 (256,448,3)
1370
+ 00084/0306 7 (256,448,3)
1371
+ 00084/0442 7 (256,448,3)
1372
+ 00084/0669 7 (256,448,3)
1373
+ 00084/0679 7 (256,448,3)
1374
+ 00084/0685 7 (256,448,3)
1375
+ 00084/0691 7 (256,448,3)
1376
+ 00084/0768 7 (256,448,3)
1377
+ 00084/0817 7 (256,448,3)
1378
+ 00085/0027 7 (256,448,3)
1379
+ 00085/0035 7 (256,448,3)
1380
+ 00085/0038 7 (256,448,3)
1381
+ 00085/0223 7 (256,448,3)
1382
+ 00085/0233 7 (256,448,3)
1383
+ 00085/0281 7 (256,448,3)
1384
+ 00085/0287 7 (256,448,3)
1385
+ 00085/0313 7 (256,448,3)
1386
+ 00085/0521 7 (256,448,3)
1387
+ 00085/0848 7 (256,448,3)
1388
+ 00085/0855 7 (256,448,3)
1389
+ 00085/0865 7 (256,448,3)
1390
+ 00085/0952 7 (256,448,3)
1391
+ 00085/0964 7 (256,448,3)
1392
+ 00085/0973 7 (256,448,3)
1393
+ 00085/0986 7 (256,448,3)
1394
+ 00085/0993 7 (256,448,3)
1395
+ 00086/0070 7 (256,448,3)
1396
+ 00086/0075 7 (256,448,3)
1397
+ 00086/0094 7 (256,448,3)
1398
+ 00086/0103 7 (256,448,3)
1399
+ 00086/0112 7 (256,448,3)
1400
+ 00086/0288 7 (256,448,3)
1401
+ 00086/0576 7 (256,448,3)
1402
+ 00086/0580 7 (256,448,3)
1403
+ 00086/0584 7 (256,448,3)
1404
+ 00086/0599 7 (256,448,3)
1405
+ 00086/0600 7 (256,448,3)
1406
+ 00086/0602 7 (256,448,3)
1407
+ 00086/0612 7 (256,448,3)
1408
+ 00086/0629 7 (256,448,3)
1409
+ 00086/0655 7 (256,448,3)
1410
+ 00086/0679 7 (256,448,3)
1411
+ 00086/0694 7 (256,448,3)
1412
+ 00086/0695 7 (256,448,3)
1413
+ 00086/0701 7 (256,448,3)
1414
+ 00086/0760 7 (256,448,3)
1415
+ 00086/0786 7 (256,448,3)
1416
+ 00086/0845 7 (256,448,3)
1417
+ 00086/0868 7 (256,448,3)
1418
+ 00086/0889 7 (256,448,3)
1419
+ 00086/0891 7 (256,448,3)
1420
+ 00086/0927 7 (256,448,3)
1421
+ 00086/0938 7 (256,448,3)
1422
+ 00086/0946 7 (256,448,3)
1423
+ 00086/0963 7 (256,448,3)
1424
+ 00086/0969 7 (256,448,3)
1425
+ 00087/0023 7 (256,448,3)
1426
+ 00087/0029 7 (256,448,3)
1427
+ 00087/0144 7 (256,448,3)
1428
+ 00087/0148 7 (256,448,3)
1429
+ 00087/0159 7 (256,448,3)
1430
+ 00087/0174 7 (256,448,3)
1431
+ 00087/0283 7 (256,448,3)
1432
+ 00087/0284 7 (256,448,3)
1433
+ 00087/0294 7 (256,448,3)
1434
+ 00087/0296 7 (256,448,3)
1435
+ 00087/0498 7 (256,448,3)
1436
+ 00087/0502 7 (256,448,3)
1437
+ 00087/0532 7 (256,448,3)
1438
+ 00087/0557 7 (256,448,3)
1439
+ 00087/0559 7 (256,448,3)
1440
+ 00087/0574 7 (256,448,3)
1441
+ 00087/0577 7 (256,448,3)
1442
+ 00088/0006 7 (256,448,3)
1443
+ 00088/0268 7 (256,448,3)
1444
+ 00088/0320 7 (256,448,3)
1445
+ 00088/0412 7 (256,448,3)
1446
+ 00088/0431 7 (256,448,3)
1447
+ 00088/0432 7 (256,448,3)
1448
+ 00088/0465 7 (256,448,3)
1449
+ 00088/0507 7 (256,448,3)
1450
+ 00088/0565 7 (256,448,3)
1451
+ 00088/0629 7 (256,448,3)
1452
+ 00088/0831 7 (256,448,3)
1453
+ 00088/0836 7 (256,448,3)
1454
+ 00088/0972 7 (256,448,3)
1455
+ 00088/0974 7 (256,448,3)
1456
+ 00088/0980 7 (256,448,3)
1457
+ 00089/0067 7 (256,448,3)
1458
+ 00089/0244 7 (256,448,3)
1459
+ 00089/0404 7 (256,448,3)
1460
+ 00089/0416 7 (256,448,3)
1461
+ 00089/0419 7 (256,448,3)
1462
+ 00089/0428 7 (256,448,3)
1463
+ 00089/0712 7 (256,448,3)
1464
+ 00089/0713 7 (256,448,3)
1465
+ 00089/0723 7 (256,448,3)
1466
+ 00089/0727 7 (256,448,3)
1467
+ 00089/0770 7 (256,448,3)
1468
+ 00089/0809 7 (256,448,3)
1469
+ 00089/0811 7 (256,448,3)
1470
+ 00089/0888 7 (256,448,3)
1471
+ 00089/0898 7 (256,448,3)
1472
+ 00089/0903 7 (256,448,3)
1473
+ 00089/0907 7 (256,448,3)
1474
+ 00089/0911 7 (256,448,3)
1475
+ 00089/0915 7 (256,448,3)
1476
+ 00089/0926 7 (256,448,3)
1477
+ 00089/0955 7 (256,448,3)
1478
+ 00090/0027 7 (256,448,3)
1479
+ 00090/0028 7 (256,448,3)
1480
+ 00090/0032 7 (256,448,3)
1481
+ 00090/0038 7 (256,448,3)
1482
+ 00090/0076 7 (256,448,3)
1483
+ 00090/0081 7 (256,448,3)
1484
+ 00090/0086 7 (256,448,3)
1485
+ 00090/0119 7 (256,448,3)
1486
+ 00090/0258 7 (256,448,3)
1487
+ 00090/0261 7 (256,448,3)
1488
+ 00090/0447 7 (256,448,3)
1489
+ 00090/0498 7 (256,448,3)
1490
+ 00090/0514 7 (256,448,3)
1491
+ 00090/0523 7 (256,448,3)
1492
+ 00090/0530 7 (256,448,3)
1493
+ 00090/0540 7 (256,448,3)
1494
+ 00090/0548 7 (256,448,3)
1495
+ 00090/0565 7 (256,448,3)
1496
+ 00090/0578 7 (256,448,3)
1497
+ 00090/0580 7 (256,448,3)
1498
+ 00090/0581 7 (256,448,3)
1499
+ 00090/0780 7 (256,448,3)
1500
+ 00090/0940 7 (256,448,3)
1501
+ 00090/0984 7 (256,448,3)
1502
+ 00091/0023 7 (256,448,3)
1503
+ 00091/0051 7 (256,448,3)
1504
+ 00091/0317 7 (256,448,3)
1505
+ 00091/0320 7 (256,448,3)
1506
+ 00091/0582 7 (256,448,3)
1507
+ 00091/0585 7 (256,448,3)
1508
+ 00091/0588 7 (256,448,3)
1509
+ 00091/0601 7 (256,448,3)
1510
+ 00091/0602 7 (256,448,3)
1511
+ 00091/0603 7 (256,448,3)
1512
+ 00091/0634 7 (256,448,3)
1513
+ 00091/0693 7 (256,448,3)
1514
+ 00091/0741 7 (256,448,3)
1515
+ 00091/0966 7 (256,448,3)
1516
+ 00091/0973 7 (256,448,3)
1517
+ 00091/0985 7 (256,448,3)
1518
+ 00092/0007 7 (256,448,3)
1519
+ 00092/0132 7 (256,448,3)
1520
+ 00092/0270 7 (256,448,3)
1521
+ 00092/0296 7 (256,448,3)
1522
+ 00092/0611 7 (256,448,3)
1523
+ 00092/0625 7 (256,448,3)
1524
+ 00092/0627 7 (256,448,3)
1525
+ 00092/0651 7 (256,448,3)
1526
+ 00092/0652 7 (256,448,3)
1527
+ 00092/0910 7 (256,448,3)
1528
+ 00093/0075 7 (256,448,3)
1529
+ 00093/0078 7 (256,448,3)
1530
+ 00093/0100 7 (256,448,3)
1531
+ 00093/0132 7 (256,448,3)
1532
+ 00093/0133 7 (256,448,3)
1533
+ 00093/0176 7 (256,448,3)
1534
+ 00093/0177 7 (256,448,3)
1535
+ 00093/0178 7 (256,448,3)
1536
+ 00093/0181 7 (256,448,3)
1537
+ 00093/0183 7 (256,448,3)
1538
+ 00093/0184 7 (256,448,3)
1539
+ 00093/0286 7 (256,448,3)
1540
+ 00093/0304 7 (256,448,3)
1541
+ 00093/0305 7 (256,448,3)
1542
+ 00093/0319 7 (256,448,3)
1543
+ 00093/0324 7 (256,448,3)
1544
+ 00093/0325 7 (256,448,3)
1545
+ 00093/0327 7 (256,448,3)
1546
+ 00093/0331 7 (256,448,3)
1547
+ 00093/0444 7 (256,448,3)
1548
+ 00093/0450 7 (256,448,3)
1549
+ 00093/0593 7 (256,448,3)
1550
+ 00094/0032 7 (256,448,3)
1551
+ 00094/0057 7 (256,448,3)
1552
+ 00094/0139 7 (256,448,3)
1553
+ 00094/0206 7 (256,448,3)
1554
+ 00094/0211 7 (256,448,3)
1555
+ 00094/0215 7 (256,448,3)
1556
+ 00094/0218 7 (256,448,3)
1557
+ 00094/0257 7 (256,448,3)
1558
+ 00094/0329 7 (256,448,3)
1559
+ 00094/0331 7 (256,448,3)
1560
+ 00094/0332 7 (256,448,3)
1561
+ 00094/0369 7 (256,448,3)
1562
+ 00094/0370 7 (256,448,3)
1563
+ 00094/0383 7 (256,448,3)
1564
+ 00094/0385 7 (256,448,3)
1565
+ 00094/0387 7 (256,448,3)
1566
+ 00094/0399 7 (256,448,3)
1567
+ 00094/0605 7 (256,448,3)
1568
+ 00094/0648 7 (256,448,3)
1569
+ 00094/0649 7 (256,448,3)
1570
+ 00094/0759 7 (256,448,3)
1571
+ 00094/0800 7 (256,448,3)
1572
+ 00094/0894 7 (256,448,3)
1573
+ 00094/0896 7 (256,448,3)
1574
+ 00095/0089 7 (256,448,3)
1575
+ 00095/0108 7 (256,448,3)
1576
+ 00095/0109 7 (256,448,3)
1577
+ 00095/0114 7 (256,448,3)
1578
+ 00095/0128 7 (256,448,3)
1579
+ 00095/0133 7 (256,448,3)
1580
+ 00095/0150 7 (256,448,3)
1581
+ 00095/0153 7 (256,448,3)
1582
+ 00095/0154 7 (256,448,3)
1583
+ 00095/0196 7 (256,448,3)
1584
+ 00095/0209 7 (256,448,3)
1585
+ 00095/0228 7 (256,448,3)
1586
+ 00095/0230 7 (256,448,3)
1587
+ 00095/0231 7 (256,448,3)
1588
+ 00095/0242 7 (256,448,3)
1589
+ 00095/0243 7 (256,448,3)
1590
+ 00095/0253 7 (256,448,3)
1591
+ 00095/0280 7 (256,448,3)
1592
+ 00095/0281 7 (256,448,3)
1593
+ 00095/0283 7 (256,448,3)
1594
+ 00095/0314 7 (256,448,3)
1595
+ 00095/0868 7 (256,448,3)
1596
+ 00095/0894 7 (256,448,3)
1597
+ 00096/0062 7 (256,448,3)
1598
+ 00096/0347 7 (256,448,3)
1599
+ 00096/0348 7 (256,448,3)
1600
+ 00096/0359 7 (256,448,3)
1601
+ 00096/0363 7 (256,448,3)
1602
+ 00096/0373 7 (256,448,3)
1603
+ 00096/0378 7 (256,448,3)
1604
+ 00096/0387 7 (256,448,3)
1605
+ 00096/0395 7 (256,448,3)
1606
+ 00096/0396 7 (256,448,3)
1607
+ 00096/0404 7 (256,448,3)
1608
+ 00096/0653 7 (256,448,3)
1609
+ 00096/0668 7 (256,448,3)
1610
+ 00096/0679 7 (256,448,3)
1611
+ 00096/0729 7 (256,448,3)
1612
+ 00096/0736 7 (256,448,3)
1613
+ 00096/0823 7 (256,448,3)
NAFNet/basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt ADDED
The diff for this file is too large to render. See raw diff
 
NAFNet/basicsr/data/paired_image_SR_LR_FullImage_Memory_dataset.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ from torch.utils import data as data
8
+ from torchvision.transforms.functional import normalize, resize
9
+
10
+ from basicsr.data.data_util import (paired_paths_from_folder,
11
+ paired_paths_from_lmdb,
12
+ paired_paths_from_meta_info_file)
13
+ from basicsr.data.transforms import augment, paired_random_crop_hw
14
+ from basicsr.utils import FileClient, imfrombytes, img2tensor, padding
15
+ import os
16
+ import numpy as np
17
+
18
+ import pickle
19
+
20
+
21
+ class PairedImageSRLRFullImageMemoryDataset(data.Dataset):
22
+ """Paired image dataset for image restoration.
23
+
24
+ Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and
25
+ GT image pairs.
26
+
27
+ There are three modes:
28
+ 1. 'lmdb': Use lmdb files.
29
+ If opt['io_backend'] == lmdb.
30
+ 2. 'meta_info_file': Use meta information file to generate paths.
31
+ If opt['io_backend'] != lmdb and opt['meta_info_file'] is not None.
32
+ 3. 'folder': Scan folders to generate paths.
33
+ The rest.
34
+
35
+ Args:
36
+ opt (dict): Config for train datasets. It contains the following keys:
37
+ dataroot_gt (str): Data root path for gt.
38
+ dataroot_lq (str): Data root path for lq.
39
+ meta_info_file (str): Path for meta information file.
40
+ io_backend (dict): IO backend type and other kwarg.
41
+ filename_tmpl (str): Template for each filename. Note that the
42
+ template excludes the file extension. Default: '{}'.
43
+ gt_size (int): Cropped patched size for gt patches.
44
+ use_flip (bool): Use horizontal flips.
45
+ use_rot (bool): Use rotation (use vertical flip and transposing h
46
+ and w for implementation).
47
+
48
+ scale (bool): Scale, which will be added automatically.
49
+ phase (str): 'train' or 'val'.
50
+ """
51
+
52
+ def __init__(self, opt):
53
+ super(PairedImageSRLRFullImageMemoryDataset, self).__init__()
54
+ self.opt = opt
55
+ # file client (io backend)
56
+ self.file_client = None
57
+ # self.io_backend_opt = opt['io_backend']
58
+ self.mean = opt['mean'] if 'mean' in opt else None
59
+ self.std = opt['std'] if 'std' in opt else None
60
+
61
+
62
+ # data_list = []
63
+ self.gts = None
64
+ self.lqs = None
65
+
66
+ self.dataroot_gt = opt['dataroot_gt']
67
+ self.dataroot_lq = opt['dataroot_lq']
68
+
69
+
70
+
71
+ def __getitem__(self, index):
72
+ if self.lqs is None:
73
+ # print('self.dataroot lq .. ', self.dataroot_lq, self.dataroot_gt)
74
+ with open(self.dataroot_lq, 'rb') as f:
75
+ self.lqs = pickle.load(f)
76
+ if self.gts is None:
77
+ with open(self.dataroot_gt, 'rb') as f:
78
+ self.gts = pickle.load(f)
79
+ # with open(opt['dataroot_gt'], 'rb') as f:
80
+ # self.gts = pickle.load(f)
81
+
82
+
83
+
84
+ index = index % len(self.lqs)
85
+
86
+ scale = self.opt['scale']
87
+
88
+ # lr_id, hr_id = self.data_list[index]
89
+ #
90
+ # # print('lr_id, hr_id .. ', lr_id, hr_id)
91
+ #
92
+ # try:
93
+ # img_lr = np.frombuffer(self.fetcher.get(lr_id), np.uint8)
94
+ # img_hr = np.frombuffer(self.fetcher.get(hr_id), np.uint8)
95
+ # except:
96
+ # import time
97
+ # # time.sleep(0.01)
98
+ # # raise Exception(f'nori id..{index},{lr_id},{hr_id} not working .. ')
99
+ # print(f'nori id..{index},{lr_id},{hr_id} not working .. ')
100
+ # exit(0)
101
+ # # return self.__getitem__(index)
102
+ #
103
+ #
104
+ # h, w, c = 480, 480, 6
105
+ #
106
+ # if img_hr.shape[0] != h * w * c:
107
+ # print('index .. ', index, lr_id, hr_id, img_hr.shape, img_lr.shape)
108
+ #
109
+ # assert img_hr.shape[0] == h * w * c
110
+
111
+ img_lq = self.lqs[index].copy().astype(np.float32) / 255.
112
+ #
113
+ # print('index .. ', index)
114
+ # if index >= len(self.gts1):
115
+ # index_gt = index - len(self.gts1)
116
+ # img_gt = self.gts2[index_gt].copy().astype(np.float32) / 255.
117
+ # else:
118
+ # index_gt = index
119
+ # img_gt = self.gts1[index_gt].copy().astype(np.float32) / 255.
120
+ #
121
+ #
122
+ img_gt = self.gts[index].copy().astype(np.float32) / 255.
123
+
124
+ # img_lr = img_lr.reshape(h // 4, w // 4, c).astype(np.float32) / 255.
125
+ # img_hr = img_hr.reshape(h, w, c).astype(np.float32) / 255.
126
+
127
+ # img_lr, img_hr = img_lr.copy(), img_hr.copy()
128
+
129
+
130
+ # Load gt and lq images. Dimension order: HWC; channel order: BGR;
131
+ # image range: [0, 1], float32.
132
+ # gt_path = self.paths[index]['gt_path']
133
+
134
+ # gt_path_L = os.path.join(self.gt_folder, '{:04}_L.png'.format(index + 1))
135
+ # gt_path_R = os.path.join(self.gt_folder, '{:04}_R.png'.format(index + 1))
136
+
137
+
138
+ # print('gt path,', gt_path)
139
+ # img_bytes = self.file_client.get(gt_path_L, 'gt')
140
+ # try:
141
+ # img_gt_L = imfrombytes(img_bytes, float32=True)
142
+ # except:
143
+ # raise Exception("gt path {} not working".format(gt_path_L))
144
+ #
145
+ # img_bytes = self.file_client.get(gt_path_R, 'gt')
146
+ # try:
147
+ # img_gt_R = imfrombytes(img_bytes, float32=True)
148
+ # except:
149
+ # raise Exception("gt path {} not working".format(gt_path_R))
150
+
151
+
152
+ # lq_path_L = os.path.join(self.lq_folder, '{:04}_L.png'.format(index + 1))
153
+ # lq_path_R = os.path.join(self.lq_folder, '{:04}_R.png'.format(index + 1))
154
+
155
+ # lq_path = self.paths[index]['lq_path']
156
+ # print(', lq path', lq_path)
157
+ # img_bytes = self.file_client.get(lq_path_L, 'lq')
158
+ # try:
159
+ # img_lq_L = imfrombytes(img_bytes, float32=True)
160
+ # except:
161
+ # raise Exception("lq path {} not working".format(lq_path_L))
162
+
163
+ # img_bytes = self.file_client.get(lq_path_R, 'lq')
164
+ # try:
165
+ # img_lq_R = imfrombytes(img_bytes, float32=True)
166
+ # except:
167
+ # raise Exception("lq path {} not working".format(lq_path_R))
168
+
169
+
170
+
171
+ # img_gt = np.concatenate([img_gt_L, img_gt_R], axis=-1)
172
+ # img_lq = np.concatenate([img_lq_L, img_lq_R], axis=-1)
173
+
174
+ # img_gt = img_hr.copy()
175
+ # img_lq = img_lr.copy()
176
+
177
+ # augmentation for training
178
+ rot90 = False
179
+
180
+ if self.opt['phase'] == 'train':
181
+ if 'gt_size_h' in self.opt and 'gt_size_w' in self.opt:
182
+ gt_size_h = int(self.opt['gt_size_h'])
183
+ gt_size_w = int(self.opt['gt_size_w'])
184
+ else:
185
+ gt_size = int(self.opt['gt_size'])
186
+ gt_size_h, gt_size_w = gt_size, gt_size
187
+
188
+
189
+ if 'flip_LR' in self.opt and self.opt['flip_LR']:
190
+ if np.random.rand() < 0.5:
191
+ img_gt = img_gt[:, :, [3, 4, 5, 0, 1, 2]]
192
+ img_lq = img_lq[:, :, [3, 4, 5, 0, 1, 2]]
193
+
194
+ # img_gt, img_lq
195
+
196
+ if 'flip_RGB' in self.opt and self.opt['flip_RGB']:
197
+ idx = [
198
+ [0, 1, 2, 3, 4, 5],
199
+ [0, 2, 1, 3, 5, 4],
200
+ [1, 0, 2, 4, 3, 5],
201
+ [1, 2, 0, 4, 5, 3],
202
+ [2, 0, 1, 5, 3, 4],
203
+ [2, 1, 0, 5, 4, 3],
204
+ ][int(np.random.rand() * 6)]
205
+
206
+ img_gt = img_gt[:, :, idx]
207
+ img_lq = img_lq[:, :, idx]
208
+
209
+ if 'inverse_RGB' in self.opt and self.opt['inverse_RGB']:
210
+ for i in range(3):
211
+ if np.random.rand() < 0.5:
212
+ img_gt[:, :, i] = 1 - img_gt[:, :, i]
213
+ img_gt[:, :, i+3] = 1 - img_gt[:, :, i+3]
214
+ img_lq[:, :, i] = 1 - img_lq[:, :, i]
215
+ img_lq[:, :, i+3] = 1 - img_lq[:, :, i+3]
216
+
217
+ if 'naive_inverse_RGB' in self.opt and self.opt['naive_inverse_RGB']:
218
+ # for i in range(3):
219
+ if np.random.rand() < 0.5:
220
+ img_gt = 1 - img_gt
221
+ img_lq = 1 - img_lq
222
+ # img_gt[:, :, i] = 1 - img_gt[:, :, i]
223
+ # img_gt[:, :, i+3] = 1 - img_gt[:, :, i+3]
224
+ # img_lq[:, :, i] = 1 - img_lq[:, :, i]
225
+ # img_lq[:, :, i+3] = 1 - img_lq[:, :, i+3]
226
+
227
+ if 'random_offset' in self.opt and self.opt['random_offset'] > 0:
228
+ # if np.random.rand() < 0.9:
229
+ S = int(self.opt['random_offset'])
230
+
231
+ offsets = int(np.random.rand() * (S+1)) #1~S
232
+ s2, s4 = 0, 0
233
+
234
+ if np.random.rand() < 0.5:
235
+ s2 = offsets
236
+ else:
237
+ s4 = offsets
238
+
239
+ _, w, _ = img_lq.shape
240
+
241
+ img_lq = np.concatenate([img_lq[:, s2:w-s4, :3], img_lq[:, s4:w-s2, 3:]], axis=-1)
242
+ img_gt = np.concatenate(
243
+ [img_gt[:, 4 * s2:4*w-4 * s4, :3], img_gt[:, 4 * s4:4*w-4 * s2, 3:]], axis=-1)
244
+
245
+ # random crop
246
+ img_gt, img_lq = img_gt.copy(), img_lq.copy()
247
+ img_gt, img_lq = paired_random_crop_hw(img_gt, img_lq, gt_size_h, gt_size_w, scale,
248
+ 'gt_path_L_and_R')
249
+ # flip, rotation
250
+ imgs, status = augment([img_gt, img_lq], self.opt['use_hflip'],
251
+ self.opt['use_rot'], vflip=self.opt['use_vflip'], return_status=True)
252
+
253
+
254
+ img_gt, img_lq = imgs
255
+ hflip, vflip, rot90 = status
256
+
257
+ # if self.opt['phase'] == 'train':
258
+ # gt_size = self.opt['gt_size']
259
+ # # padding
260
+ # img_gt, img_lq = padding(img_gt, img_lq, gt_size)
261
+ #
262
+ # # random crop
263
+ # img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale,
264
+ # 'gt_path_L_and_R')
265
+ # # flip, rotation
266
+ # img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'],
267
+ # self.opt['use_rot'], vflip=self.opt['use_vflip'])
268
+
269
+ # TODO: color space transform
270
+ # BGR to RGB, HWC to CHW, numpy to tensor
271
+ img_gt, img_lq = img2tensor([img_gt, img_lq],
272
+ bgr2rgb=True,
273
+ float32=True)
274
+ # normalize
275
+ if self.mean is not None or self.std is not None:
276
+ normalize(img_lq, self.mean, self.std, inplace=True)
277
+ normalize(img_gt, self.mean, self.std, inplace=True)
278
+
279
+ # if scale != 1:
280
+ # c, h, w = img_lq.shape
281
+ # img_lq = resize(img_lq, [h*scale, w*scale])
282
+ # print('img_lq .. ', img_lq.shape, img_gt.shape)
283
+
284
+ return {
285
+ 'lq': img_lq,
286
+ 'gt': img_gt,
287
+ 'lq_path': 'lq path ',
288
+ 'gt_path': 'gt path ',
289
+ 'is_rot': 1. if rot90 else 0.
290
+ }
291
+
292
+ def __len__(self):
293
+ return 3200005
294
+ # return 1000
295
+ # return len(self.lqs)
296
+ # return len(self.paths)
NAFNet/basicsr/data/paired_image_SR_LR_dataset.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ from torch.utils import data as data
8
+ from torchvision.transforms.functional import normalize, resize
9
+
10
+ from basicsr.data.data_util import (paired_paths_from_folder,
11
+ paired_paths_from_lmdb,
12
+ paired_paths_from_meta_info_file)
13
+ from basicsr.data.transforms import augment, paired_random_crop, paired_random_crop_hw
14
+ from basicsr.utils import FileClient, imfrombytes, img2tensor, padding
15
+ import os
16
+ import numpy as np
17
+
18
+ class PairedImageSRLRDataset(data.Dataset):
19
+ """Paired image dataset for image restoration.
20
+
21
+ Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and
22
+ GT image pairs.
23
+
24
+ There are three modes:
25
+ 1. 'lmdb': Use lmdb files.
26
+ If opt['io_backend'] == lmdb.
27
+ 2. 'meta_info_file': Use meta information file to generate paths.
28
+ If opt['io_backend'] != lmdb and opt['meta_info_file'] is not None.
29
+ 3. 'folder': Scan folders to generate paths.
30
+ The rest.
31
+
32
+ Args:
33
+ opt (dict): Config for train datasets. It contains the following keys:
34
+ dataroot_gt (str): Data root path for gt.
35
+ dataroot_lq (str): Data root path for lq.
36
+ meta_info_file (str): Path for meta information file.
37
+ io_backend (dict): IO backend type and other kwarg.
38
+ filename_tmpl (str): Template for each filename. Note that the
39
+ template excludes the file extension. Default: '{}'.
40
+ gt_size (int): Cropped patched size for gt patches.
41
+ use_flip (bool): Use horizontal flips.
42
+ use_rot (bool): Use rotation (use vertical flip and transposing h
43
+ and w for implementation).
44
+
45
+ scale (bool): Scale, which will be added automatically.
46
+ phase (str): 'train' or 'val'.
47
+ """
48
+
49
+ def __init__(self, opt):
50
+ super(PairedImageSRLRDataset, self).__init__()
51
+ self.opt = opt
52
+ # file client (io backend)
53
+ self.file_client = None
54
+ self.io_backend_opt = opt['io_backend']
55
+ self.mean = opt['mean'] if 'mean' in opt else None
56
+ self.std = opt['std'] if 'std' in opt else None
57
+
58
+ self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
59
+ if 'filename_tmpl' in opt:
60
+ self.filename_tmpl = opt['filename_tmpl']
61
+ else:
62
+ self.filename_tmpl = '{}'
63
+
64
+ if self.io_backend_opt['type'] == 'lmdb':
65
+ self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder]
66
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
67
+ self.paths = paired_paths_from_lmdb(
68
+ [self.lq_folder, self.gt_folder], ['lq', 'gt'])
69
+ elif 'meta_info_file' in self.opt and self.opt[
70
+ 'meta_info_file'] is not None:
71
+ self.paths = paired_paths_from_meta_info_file(
72
+ [self.lq_folder, self.gt_folder], ['lq', 'gt'],
73
+ self.opt['meta_info_file'], self.filename_tmpl)
74
+ else:
75
+ import os
76
+ nums_lq = len(os.listdir(self.lq_folder))
77
+ nums_gt = len(os.listdir(self.gt_folder))
78
+
79
+ # nums_lq = sorted(nums_lq)
80
+ # nums_gt = sorted(nums_gt)
81
+
82
+ # print('lq gt ... opt')
83
+ # print(nums_lq, nums_gt, opt)
84
+ assert nums_gt == nums_lq
85
+
86
+ self.nums = nums_lq
87
+ # {:04}_L {:04}_R
88
+
89
+
90
+ # self.paths = paired_paths_from_folder(
91
+ # [self.lq_folder, self.gt_folder], ['lq', 'gt'],
92
+ # self.filename_tmpl)
93
+
94
+ def __getitem__(self, index):
95
+ if self.file_client is None:
96
+ self.file_client = FileClient(
97
+ self.io_backend_opt.pop('type'), **self.io_backend_opt)
98
+
99
+ scale = self.opt['scale']
100
+
101
+ # Load gt and lq images. Dimension order: HWC; channel order: BGR;
102
+ # image range: [0, 1], float32.
103
+ # gt_path = self.paths[index]['gt_path']
104
+
105
+ gt_path_L = os.path.join(self.gt_folder, '{:04}_L.png'.format(index + 1))
106
+ gt_path_R = os.path.join(self.gt_folder, '{:04}_R.png'.format(index + 1))
107
+
108
+
109
+ # print('gt path,', gt_path)
110
+ img_bytes = self.file_client.get(gt_path_L, 'gt')
111
+ try:
112
+ img_gt_L = imfrombytes(img_bytes, float32=True)
113
+ except:
114
+ raise Exception("gt path {} not working".format(gt_path_L))
115
+
116
+ img_bytes = self.file_client.get(gt_path_R, 'gt')
117
+ try:
118
+ img_gt_R = imfrombytes(img_bytes, float32=True)
119
+ except:
120
+ raise Exception("gt path {} not working".format(gt_path_R))
121
+
122
+
123
+ lq_path_L = os.path.join(self.lq_folder, '{:04}_L.png'.format(index + 1))
124
+ lq_path_R = os.path.join(self.lq_folder, '{:04}_R.png'.format(index + 1))
125
+
126
+ # lq_path = self.paths[index]['lq_path']
127
+ # print(', lq path', lq_path)
128
+ img_bytes = self.file_client.get(lq_path_L, 'lq')
129
+ try:
130
+ img_lq_L = imfrombytes(img_bytes, float32=True)
131
+ except:
132
+ raise Exception("lq path {} not working".format(lq_path_L))
133
+
134
+ img_bytes = self.file_client.get(lq_path_R, 'lq')
135
+ try:
136
+ img_lq_R = imfrombytes(img_bytes, float32=True)
137
+ except:
138
+ raise Exception("lq path {} not working".format(lq_path_R))
139
+
140
+
141
+
142
+ img_gt = np.concatenate([img_gt_L, img_gt_R], axis=-1)
143
+ img_lq = np.concatenate([img_lq_L, img_lq_R], axis=-1)
144
+
145
+ # augmentation for training
146
+ if self.opt['phase'] == 'train':
147
+ gt_size = self.opt['gt_size']
148
+ # padding
149
+ img_gt, img_lq = padding(img_gt, img_lq, gt_size)
150
+
151
+ # random crop
152
+ img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale,
153
+ gt_path_L)
154
+ # flip, rotation
155
+ img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_flip'],
156
+ self.opt['use_rot'])
157
+
158
+ # TODO: color space transform
159
+ # BGR to RGB, HWC to CHW, numpy to tensor
160
+ img_gt, img_lq = img2tensor([img_gt, img_lq],
161
+ bgr2rgb=True,
162
+ float32=True)
163
+ # normalize
164
+ if self.mean is not None or self.std is not None:
165
+ normalize(img_lq, self.mean, self.std, inplace=True)
166
+ normalize(img_gt, self.mean, self.std, inplace=True)
167
+
168
+ # if scale != 1:
169
+ # c, h, w = img_lq.shape
170
+ # img_lq = resize(img_lq, [h*scale, w*scale])
171
+ # print('img_lq .. ', img_lq.shape, img_gt.shape)
172
+
173
+
174
+ return {
175
+ 'lq': img_lq,
176
+ 'gt': img_gt,
177
+ 'lq_path': f'{index+1:04}',
178
+ 'gt_path': f'{index+1:04}',
179
+ }
180
+
181
+ def __len__(self):
182
+ return self.nums // 2
183
+
184
+
185
+ class PairedStereoImageDataset(data.Dataset):
186
+ '''
187
+ Paired dataset for stereo SR (Flickr1024, KITTI, Middlebury)
188
+ '''
189
+ def __init__(self, opt):
190
+ super(PairedStereoImageDataset, self).__init__()
191
+ self.opt = opt
192
+ # file client (io backend)
193
+ self.file_client = None
194
+ self.io_backend_opt = opt['io_backend']
195
+ self.mean = opt['mean'] if 'mean' in opt else None
196
+ self.std = opt['std'] if 'std' in opt else None
197
+
198
+ self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
199
+ if 'filename_tmpl' in opt:
200
+ self.filename_tmpl = opt['filename_tmpl']
201
+ else:
202
+ self.filename_tmpl = '{}'
203
+
204
+ assert self.io_backend_opt['type'] == 'disk'
205
+ import os
206
+ self.lq_files = os.listdir(self.lq_folder)
207
+ self.gt_files = os.listdir(self.gt_folder)
208
+
209
+ self.nums = len(self.gt_files)
210
+
211
+ def __getitem__(self, index):
212
+ if self.file_client is None:
213
+ self.file_client = FileClient(
214
+ self.io_backend_opt.pop('type'), **self.io_backend_opt)
215
+
216
+ gt_path_L = os.path.join(self.gt_folder, self.gt_files[index], 'hr0.png')
217
+ gt_path_R = os.path.join(self.gt_folder, self.gt_files[index], 'hr1.png')
218
+
219
+ img_bytes = self.file_client.get(gt_path_L, 'gt')
220
+ try:
221
+ img_gt_L = imfrombytes(img_bytes, float32=True)
222
+ except:
223
+ raise Exception("gt path {} not working".format(gt_path_L))
224
+
225
+ img_bytes = self.file_client.get(gt_path_R, 'gt')
226
+ try:
227
+ img_gt_R = imfrombytes(img_bytes, float32=True)
228
+ except:
229
+ raise Exception("gt path {} not working".format(gt_path_R))
230
+
231
+ lq_path_L = os.path.join(self.lq_folder, self.lq_files[index], 'lr0.png')
232
+ lq_path_R = os.path.join(self.lq_folder, self.lq_files[index], 'lr1.png')
233
+
234
+ # lq_path = self.paths[index]['lq_path']
235
+ # print(', lq path', lq_path)
236
+ img_bytes = self.file_client.get(lq_path_L, 'lq')
237
+ try:
238
+ img_lq_L = imfrombytes(img_bytes, float32=True)
239
+ except:
240
+ raise Exception("lq path {} not working".format(lq_path_L))
241
+
242
+ img_bytes = self.file_client.get(lq_path_R, 'lq')
243
+ try:
244
+ img_lq_R = imfrombytes(img_bytes, float32=True)
245
+ except:
246
+ raise Exception("lq path {} not working".format(lq_path_R))
247
+
248
+ img_gt = np.concatenate([img_gt_L, img_gt_R], axis=-1)
249
+ img_lq = np.concatenate([img_lq_L, img_lq_R], axis=-1)
250
+
251
+ scale = self.opt['scale']
252
+ # augmentation for training
253
+ if self.opt['phase'] == 'train':
254
+ if 'gt_size_h' in self.opt and 'gt_size_w' in self.opt:
255
+ gt_size_h = int(self.opt['gt_size_h'])
256
+ gt_size_w = int(self.opt['gt_size_w'])
257
+ else:
258
+ gt_size = int(self.opt['gt_size'])
259
+ gt_size_h, gt_size_w = gt_size, gt_size
260
+
261
+ if 'flip_RGB' in self.opt and self.opt['flip_RGB']:
262
+ idx = [
263
+ [0, 1, 2, 3, 4, 5],
264
+ [0, 2, 1, 3, 5, 4],
265
+ [1, 0, 2, 4, 3, 5],
266
+ [1, 2, 0, 4, 5, 3],
267
+ [2, 0, 1, 5, 3, 4],
268
+ [2, 1, 0, 5, 4, 3],
269
+ ][int(np.random.rand() * 6)]
270
+
271
+ img_gt = img_gt[:, :, idx]
272
+ img_lq = img_lq[:, :, idx]
273
+
274
+ # random crop
275
+ img_gt, img_lq = img_gt.copy(), img_lq.copy()
276
+ img_gt, img_lq = paired_random_crop_hw(img_gt, img_lq, gt_size_h, gt_size_w, scale,
277
+ 'gt_path_L_and_R')
278
+ # flip, rotation
279
+ imgs, status = augment([img_gt, img_lq], self.opt['use_hflip'],
280
+ self.opt['use_rot'], vflip=self.opt['use_vflip'], return_status=True)
281
+
282
+
283
+ img_gt, img_lq = imgs
284
+
285
+ img_gt, img_lq = img2tensor([img_gt, img_lq],
286
+ bgr2rgb=True,
287
+ float32=True)
288
+ # normalize
289
+ if self.mean is not None or self.std is not None:
290
+ normalize(img_lq, self.mean, self.std, inplace=True)
291
+ normalize(img_gt, self.mean, self.std, inplace=True)
292
+
293
+ return {
294
+ 'lq': img_lq,
295
+ 'gt': img_gt,
296
+ 'lq_path': os.path.join(self.lq_folder, self.lq_files[index]),
297
+ 'gt_path': os.path.join(self.gt_folder, self.gt_files[index]),
298
+ }
299
+
300
+ def __len__(self):
301
+ return self.nums
NAFNet/basicsr/data/paired_image_dataset.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ from torch.utils import data as data
8
+ from torchvision.transforms.functional import normalize
9
+
10
+ from basicsr.data.data_util import (paired_paths_from_folder,
11
+ paired_paths_from_lmdb,
12
+ paired_paths_from_meta_info_file)
13
+ from basicsr.data.transforms import augment, paired_random_crop
14
+ from basicsr.utils import FileClient, imfrombytes, img2tensor, padding
15
+
16
+
17
+ class PairedImageDataset(data.Dataset):
18
+ """Paired image dataset for image restoration.
19
+
20
+ Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and
21
+ GT image pairs.
22
+
23
+ There are three modes:
24
+ 1. 'lmdb': Use lmdb files.
25
+ If opt['io_backend'] == lmdb.
26
+ 2. 'meta_info_file': Use meta information file to generate paths.
27
+ If opt['io_backend'] != lmdb and opt['meta_info_file'] is not None.
28
+ 3. 'folder': Scan folders to generate paths.
29
+ The rest.
30
+
31
+ Args:
32
+ opt (dict): Config for train datasets. It contains the following keys:
33
+ dataroot_gt (str): Data root path for gt.
34
+ dataroot_lq (str): Data root path for lq.
35
+ meta_info_file (str): Path for meta information file.
36
+ io_backend (dict): IO backend type and other kwarg.
37
+ filename_tmpl (str): Template for each filename. Note that the
38
+ template excludes the file extension. Default: '{}'.
39
+ gt_size (int): Cropped patched size for gt patches.
40
+ use_flip (bool): Use horizontal flips.
41
+ use_rot (bool): Use rotation (use vertical flip and transposing h
42
+ and w for implementation).
43
+
44
+ scale (bool): Scale, which will be added automatically.
45
+ phase (str): 'train' or 'val'.
46
+ """
47
+
48
+ def __init__(self, opt):
49
+ super(PairedImageDataset, self).__init__()
50
+ self.opt = opt
51
+ # file client (io backend)
52
+ self.file_client = None
53
+ self.io_backend_opt = opt['io_backend']
54
+ self.mean = opt['mean'] if 'mean' in opt else None
55
+ self.std = opt['std'] if 'std' in opt else None
56
+
57
+ self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
58
+ if 'filename_tmpl' in opt:
59
+ self.filename_tmpl = opt['filename_tmpl']
60
+ else:
61
+ self.filename_tmpl = '{}'
62
+
63
+ if self.io_backend_opt['type'] == 'lmdb':
64
+ self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder]
65
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
66
+ self.paths = paired_paths_from_lmdb(
67
+ [self.lq_folder, self.gt_folder], ['lq', 'gt'])
68
+ elif 'meta_info_file' in self.opt and self.opt[
69
+ 'meta_info_file'] is not None:
70
+ self.paths = paired_paths_from_meta_info_file(
71
+ [self.lq_folder, self.gt_folder], ['lq', 'gt'],
72
+ self.opt['meta_info_file'], self.filename_tmpl)
73
+ else:
74
+ self.paths = paired_paths_from_folder(
75
+ [self.lq_folder, self.gt_folder], ['lq', 'gt'],
76
+ self.filename_tmpl)
77
+
78
+ def __getitem__(self, index):
79
+ if self.file_client is None:
80
+ self.file_client = FileClient(
81
+ self.io_backend_opt.pop('type'), **self.io_backend_opt)
82
+
83
+ scale = self.opt['scale']
84
+
85
+ # Load gt and lq images. Dimension order: HWC; channel order: BGR;
86
+ # image range: [0, 1], float32.
87
+ gt_path = self.paths[index]['gt_path']
88
+ # print('gt path,', gt_path)
89
+ img_bytes = self.file_client.get(gt_path, 'gt')
90
+ try:
91
+ img_gt = imfrombytes(img_bytes, float32=True)
92
+ except:
93
+ raise Exception("gt path {} not working".format(gt_path))
94
+
95
+ lq_path = self.paths[index]['lq_path']
96
+ # print(', lq path', lq_path)
97
+ img_bytes = self.file_client.get(lq_path, 'lq')
98
+ try:
99
+ img_lq = imfrombytes(img_bytes, float32=True)
100
+ except:
101
+ raise Exception("lq path {} not working".format(lq_path))
102
+
103
+
104
+ # augmentation for training
105
+ if self.opt['phase'] == 'train':
106
+ gt_size = self.opt['gt_size']
107
+ # padding
108
+ img_gt, img_lq = padding(img_gt, img_lq, gt_size)
109
+
110
+ # random crop
111
+ img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale,
112
+ gt_path)
113
+ # flip, rotation
114
+ img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_flip'],
115
+ self.opt['use_rot'])
116
+
117
+ # TODO: color space transform
118
+ # BGR to RGB, HWC to CHW, numpy to tensor
119
+ img_gt, img_lq = img2tensor([img_gt, img_lq],
120
+ bgr2rgb=True,
121
+ float32=True)
122
+ # normalize
123
+ if self.mean is not None or self.std is not None:
124
+ normalize(img_lq, self.mean, self.std, inplace=True)
125
+ normalize(img_gt, self.mean, self.std, inplace=True)
126
+
127
+ return {
128
+ 'lq': img_lq,
129
+ 'gt': img_gt,
130
+ 'lq_path': lq_path,
131
+ 'gt_path': gt_path
132
+ }
133
+
134
+ def __len__(self):
135
+ return len(self.paths)
NAFNet/basicsr/data/prefetch_dataloader.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import queue as Queue
8
+ import threading
9
+ import torch
10
+ from torch.utils.data import DataLoader
11
+
12
+
13
+ class PrefetchGenerator(threading.Thread):
14
+ """A general prefetch generator.
15
+
16
+ Ref:
17
+ https://stackoverflow.com/questions/7323664/python-generator-pre-fetch
18
+
19
+ Args:
20
+ generator: Python generator.
21
+ num_prefetch_queue (int): Number of prefetch queue.
22
+ """
23
+
24
+ def __init__(self, generator, num_prefetch_queue):
25
+ threading.Thread.__init__(self)
26
+ self.queue = Queue.Queue(num_prefetch_queue)
27
+ self.generator = generator
28
+ self.daemon = True
29
+ self.start()
30
+
31
+ def run(self):
32
+ for item in self.generator:
33
+ self.queue.put(item)
34
+ self.queue.put(None)
35
+
36
+ def __next__(self):
37
+ next_item = self.queue.get()
38
+ if next_item is None:
39
+ raise StopIteration
40
+ return next_item
41
+
42
+ def __iter__(self):
43
+ return self
44
+
45
+
46
+ class PrefetchDataLoader(DataLoader):
47
+ """Prefetch version of dataloader.
48
+
49
+ Ref:
50
+ https://github.com/IgorSusmelj/pytorch-styleguide/issues/5#
51
+
52
+ TODO:
53
+ Need to test on single gpu and ddp (multi-gpu). There is a known issue in
54
+ ddp.
55
+
56
+ Args:
57
+ num_prefetch_queue (int): Number of prefetch queue.
58
+ kwargs (dict): Other arguments for dataloader.
59
+ """
60
+
61
+ def __init__(self, num_prefetch_queue, **kwargs):
62
+ self.num_prefetch_queue = num_prefetch_queue
63
+ super(PrefetchDataLoader, self).__init__(**kwargs)
64
+
65
+ def __iter__(self):
66
+ return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue)
67
+
68
+
69
+ class CPUPrefetcher():
70
+ """CPU prefetcher.
71
+
72
+ Args:
73
+ loader: Dataloader.
74
+ """
75
+
76
+ def __init__(self, loader):
77
+ self.ori_loader = loader
78
+ self.loader = iter(loader)
79
+
80
+ def next(self):
81
+ try:
82
+ return next(self.loader)
83
+ except StopIteration:
84
+ return None
85
+
86
+ def reset(self):
87
+ self.loader = iter(self.ori_loader)
88
+
89
+
90
+ class CUDAPrefetcher():
91
+ """CUDA prefetcher.
92
+
93
+ Ref:
94
+ https://github.com/NVIDIA/apex/issues/304#
95
+
96
+ It may consums more GPU memory.
97
+
98
+ Args:
99
+ loader: Dataloader.
100
+ opt (dict): Options.
101
+ """
102
+
103
+ def __init__(self, loader, opt):
104
+ self.ori_loader = loader
105
+ self.loader = iter(loader)
106
+ self.opt = opt
107
+ self.stream = torch.cuda.Stream()
108
+ self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
109
+ self.preload()
110
+
111
+ def preload(self):
112
+ try:
113
+ self.batch = next(self.loader) # self.batch is a dict
114
+ except StopIteration:
115
+ self.batch = None
116
+ return None
117
+ # put tensors to gpu
118
+ with torch.cuda.stream(self.stream):
119
+ for k, v in self.batch.items():
120
+ if torch.is_tensor(v):
121
+ self.batch[k] = self.batch[k].to(
122
+ device=self.device, non_blocking=True)
123
+
124
+ def next(self):
125
+ torch.cuda.current_stream().wait_stream(self.stream)
126
+ batch = self.batch
127
+ self.preload()
128
+ return batch
129
+
130
+ def reset(self):
131
+ self.loader = iter(self.ori_loader)
132
+ self.preload()
NAFNet/basicsr/data/reds_dataset.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import numpy as np
8
+ import random
9
+ import torch
10
+ from pathlib import Path
11
+ from torch.utils import data as data
12
+
13
+ from basicsr.data.transforms import augment, paired_random_crop
14
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
15
+ from basicsr.utils.flow_util import dequantize_flow
16
+
17
+
18
+ class REDSDataset(data.Dataset):
19
+ """REDS dataset for training.
20
+
21
+ The keys are generated from a meta info txt file.
22
+ basicsr/data/meta_info/meta_info_REDS_GT.txt
23
+
24
+ Each line contains:
25
+ 1. subfolder (clip) name; 2. frame number; 3. image shape, seperated by
26
+ a white space.
27
+ Examples:
28
+ 000 100 (720,1280,3)
29
+ 001 100 (720,1280,3)
30
+ ...
31
+
32
+ Key examples: "000/00000000"
33
+ GT (gt): Ground-Truth;
34
+ LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
35
+
36
+ Args:
37
+ opt (dict): Config for train dataset. It contains the following keys:
38
+ dataroot_gt (str): Data root path for gt.
39
+ dataroot_lq (str): Data root path for lq.
40
+ dataroot_flow (str, optional): Data root path for flow.
41
+ meta_info_file (str): Path for meta information file.
42
+ val_partition (str): Validation partition types. 'REDS4' or
43
+ 'official'.
44
+ io_backend (dict): IO backend type and other kwarg.
45
+
46
+ num_frame (int): Window size for input frames.
47
+ gt_size (int): Cropped patched size for gt patches.
48
+ interval_list (list): Interval list for temporal augmentation.
49
+ random_reverse (bool): Random reverse input frames.
50
+ use_flip (bool): Use horizontal flips.
51
+ use_rot (bool): Use rotation (use vertical flip and transposing h
52
+ and w for implementation).
53
+
54
+ scale (bool): Scale, which will be added automatically.
55
+ """
56
+
57
+ def __init__(self, opt):
58
+ super(REDSDataset, self).__init__()
59
+ self.opt = opt
60
+ self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(
61
+ opt['dataroot_lq'])
62
+ self.flow_root = Path(
63
+ opt['dataroot_flow']) if opt['dataroot_flow'] is not None else None
64
+ assert opt['num_frame'] % 2 == 1, (
65
+ f'num_frame should be odd number, but got {opt["num_frame"]}')
66
+ self.num_frame = opt['num_frame']
67
+ self.num_half_frames = opt['num_frame'] // 2
68
+
69
+ self.keys = []
70
+ with open(opt['meta_info_file'], 'r') as fin:
71
+ for line in fin:
72
+ folder, frame_num, _ = line.split(' ')
73
+ self.keys.extend(
74
+ [f'{folder}/{i:08d}' for i in range(int(frame_num))])
75
+
76
+ # remove the video clips used in validation
77
+ if opt['val_partition'] == 'REDS4':
78
+ val_partition = ['000', '011', '015', '020']
79
+ elif opt['val_partition'] == 'official':
80
+ val_partition = [f'{v:03d}' for v in range(240, 270)]
81
+ else:
82
+ raise ValueError(
83
+ f'Wrong validation partition {opt["val_partition"]}.'
84
+ f"Supported ones are ['official', 'REDS4'].")
85
+ self.keys = [
86
+ v for v in self.keys if v.split('/')[0] not in val_partition
87
+ ]
88
+
89
+ # file client (io backend)
90
+ self.file_client = None
91
+ self.io_backend_opt = opt['io_backend']
92
+ self.is_lmdb = False
93
+ if self.io_backend_opt['type'] == 'lmdb':
94
+ self.is_lmdb = True
95
+ if self.flow_root is not None:
96
+ self.io_backend_opt['db_paths'] = [
97
+ self.lq_root, self.gt_root, self.flow_root
98
+ ]
99
+ self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
100
+ else:
101
+ self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
102
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
103
+
104
+ # temporal augmentation configs
105
+ self.interval_list = opt['interval_list']
106
+ self.random_reverse = opt['random_reverse']
107
+ interval_str = ','.join(str(x) for x in opt['interval_list'])
108
+ logger = get_root_logger()
109
+ logger.info(f'Temporal augmentation interval list: [{interval_str}]; '
110
+ f'random reverse is {self.random_reverse}.')
111
+
112
+ def __getitem__(self, index):
113
+ if self.file_client is None:
114
+ self.file_client = FileClient(
115
+ self.io_backend_opt.pop('type'), **self.io_backend_opt)
116
+
117
+ scale = self.opt['scale']
118
+ gt_size = self.opt['gt_size']
119
+ key = self.keys[index]
120
+ clip_name, frame_name = key.split('/') # key example: 000/00000000
121
+ center_frame_idx = int(frame_name)
122
+
123
+ # determine the neighboring frames
124
+ interval = random.choice(self.interval_list)
125
+
126
+ # ensure not exceeding the borders
127
+ start_frame_idx = center_frame_idx - self.num_half_frames * interval
128
+ end_frame_idx = center_frame_idx + self.num_half_frames * interval
129
+ # each clip has 100 frames starting from 0 to 99
130
+ while (start_frame_idx < 0) or (end_frame_idx > 99):
131
+ center_frame_idx = random.randint(0, 99)
132
+ start_frame_idx = (
133
+ center_frame_idx - self.num_half_frames * interval)
134
+ end_frame_idx = center_frame_idx + self.num_half_frames * interval
135
+ frame_name = f'{center_frame_idx:08d}'
136
+ neighbor_list = list(
137
+ range(center_frame_idx - self.num_half_frames * interval,
138
+ center_frame_idx + self.num_half_frames * interval + 1,
139
+ interval))
140
+ # random reverse
141
+ if self.random_reverse and random.random() < 0.5:
142
+ neighbor_list.reverse()
143
+
144
+ assert len(neighbor_list) == self.num_frame, (
145
+ f'Wrong length of neighbor list: {len(neighbor_list)}')
146
+
147
+ # get the GT frame (as the center frame)
148
+ if self.is_lmdb:
149
+ img_gt_path = f'{clip_name}/{frame_name}'
150
+ else:
151
+ img_gt_path = self.gt_root / clip_name / f'{frame_name}.png'
152
+ img_bytes = self.file_client.get(img_gt_path, 'gt')
153
+ img_gt = imfrombytes(img_bytes, float32=True)
154
+
155
+ # get the neighboring LQ frames
156
+ img_lqs = []
157
+ for neighbor in neighbor_list:
158
+ if self.is_lmdb:
159
+ img_lq_path = f'{clip_name}/{neighbor:08d}'
160
+ else:
161
+ img_lq_path = self.lq_root / clip_name / f'{neighbor:08d}.png'
162
+ img_bytes = self.file_client.get(img_lq_path, 'lq')
163
+ img_lq = imfrombytes(img_bytes, float32=True)
164
+ img_lqs.append(img_lq)
165
+
166
+ # get flows
167
+ if self.flow_root is not None:
168
+ img_flows = []
169
+ # read previous flows
170
+ for i in range(self.num_half_frames, 0, -1):
171
+ if self.is_lmdb:
172
+ flow_path = f'{clip_name}/{frame_name}_p{i}'
173
+ else:
174
+ flow_path = (
175
+ self.flow_root / clip_name / f'{frame_name}_p{i}.png')
176
+ img_bytes = self.file_client.get(flow_path, 'flow')
177
+ cat_flow = imfrombytes(
178
+ img_bytes, flag='grayscale',
179
+ float32=False) # uint8, [0, 255]
180
+ dx, dy = np.split(cat_flow, 2, axis=0)
181
+ flow = dequantize_flow(
182
+ dx, dy, max_val=20,
183
+ denorm=False) # we use max_val 20 here.
184
+ img_flows.append(flow)
185
+ # read next flows
186
+ for i in range(1, self.num_half_frames + 1):
187
+ if self.is_lmdb:
188
+ flow_path = f'{clip_name}/{frame_name}_n{i}'
189
+ else:
190
+ flow_path = (
191
+ self.flow_root / clip_name / f'{frame_name}_n{i}.png')
192
+ img_bytes = self.file_client.get(flow_path, 'flow')
193
+ cat_flow = imfrombytes(
194
+ img_bytes, flag='grayscale',
195
+ float32=False) # uint8, [0, 255]
196
+ dx, dy = np.split(cat_flow, 2, axis=0)
197
+ flow = dequantize_flow(
198
+ dx, dy, max_val=20,
199
+ denorm=False) # we use max_val 20 here.
200
+ img_flows.append(flow)
201
+
202
+ # for random crop, here, img_flows and img_lqs have the same
203
+ # spatial size
204
+ img_lqs.extend(img_flows)
205
+
206
+ # randomly crop
207
+ img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale,
208
+ img_gt_path)
209
+ if self.flow_root is not None:
210
+ img_lqs, img_flows = img_lqs[:self.num_frame], img_lqs[self.
211
+ num_frame:]
212
+
213
+ # augmentation - flip, rotate
214
+ img_lqs.append(img_gt)
215
+ if self.flow_root is not None:
216
+ img_results, img_flows = augment(img_lqs, self.opt['use_flip'],
217
+ self.opt['use_rot'], img_flows)
218
+ else:
219
+ img_results = augment(img_lqs, self.opt['use_flip'],
220
+ self.opt['use_rot'])
221
+
222
+ img_results = img2tensor(img_results)
223
+ img_lqs = torch.stack(img_results[0:-1], dim=0)
224
+ img_gt = img_results[-1]
225
+
226
+ if self.flow_root is not None:
227
+ img_flows = img2tensor(img_flows)
228
+ # add the zero center flow
229
+ img_flows.insert(self.num_half_frames,
230
+ torch.zeros_like(img_flows[0]))
231
+ img_flows = torch.stack(img_flows, dim=0)
232
+
233
+ # img_lqs: (t, c, h, w)
234
+ # img_flows: (t, 2, h, w)
235
+ # img_gt: (c, h, w)
236
+ # key: str
237
+ if self.flow_root is not None:
238
+ return {'lq': img_lqs, 'flow': img_flows, 'gt': img_gt, 'key': key}
239
+ else:
240
+ return {'lq': img_lqs, 'gt': img_gt, 'key': key}
241
+
242
+ def __len__(self):
243
+ return len(self.keys)
NAFNet/basicsr/data/single_image_dataset.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ from os import path as osp
8
+ from torch.utils import data as data
9
+ from torchvision.transforms.functional import normalize
10
+
11
+ from basicsr.data.data_util import paths_from_lmdb
12
+ from basicsr.utils import FileClient, imfrombytes, img2tensor, scandir
13
+
14
+
15
+ class SingleImageDataset(data.Dataset):
16
+ """Read only lq images in the test phase.
17
+
18
+ Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc).
19
+
20
+ There are two modes:
21
+ 1. 'meta_info_file': Use meta information file to generate paths.
22
+ 2. 'folder': Scan folders to generate paths.
23
+
24
+ Args:
25
+ opt (dict): Config for train datasets. It contains the following keys:
26
+ dataroot_lq (str): Data root path for lq.
27
+ meta_info_file (str): Path for meta information file.
28
+ io_backend (dict): IO backend type and other kwarg.
29
+ """
30
+
31
+ def __init__(self, opt):
32
+ super(SingleImageDataset, self).__init__()
33
+ self.opt = opt
34
+ # file client (io backend)
35
+ self.file_client = None
36
+ self.io_backend_opt = opt['io_backend']
37
+ self.mean = opt['mean'] if 'mean' in opt else None
38
+ self.std = opt['std'] if 'std' in opt else None
39
+ self.lq_folder = opt['dataroot_lq']
40
+
41
+ if self.io_backend_opt['type'] == 'lmdb':
42
+ self.io_backend_opt['db_paths'] = [self.lq_folder]
43
+ self.io_backend_opt['client_keys'] = ['lq']
44
+ self.paths = paths_from_lmdb(self.lq_folder)
45
+ elif 'meta_info_file' in self.opt:
46
+ with open(self.opt['meta_info_file'], 'r') as fin:
47
+ self.paths = [
48
+ osp.join(self.lq_folder,
49
+ line.split(' ')[0]) for line in fin
50
+ ]
51
+ else:
52
+ self.paths = sorted(list(scandir(self.lq_folder, full_path=True)))
53
+
54
+ def __getitem__(self, index):
55
+ if self.file_client is None:
56
+ self.file_client = FileClient(
57
+ self.io_backend_opt.pop('type'), **self.io_backend_opt)
58
+
59
+ # load lq image
60
+ lq_path = self.paths[index]
61
+ img_bytes = self.file_client.get(lq_path, 'lq')
62
+ img_lq = imfrombytes(img_bytes, float32=True)
63
+
64
+ # TODO: color space transform
65
+ # BGR to RGB, HWC to CHW, numpy to tensor
66
+ img_lq = img2tensor(img_lq, bgr2rgb=True, float32=True)
67
+ # normalize
68
+ if self.mean is not None or self.std is not None:
69
+ normalize(img_lq, self.mean, self.std, inplace=True)
70
+ return {'lq': img_lq, 'lq_path': lq_path}
71
+
72
+ def __len__(self):
73
+ return len(self.paths)
NAFNet/basicsr/data/transforms.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import cv2
8
+ import random
9
+ from cv2 import rotate
10
+ import numpy as np
11
+
12
+
13
+ def mod_crop(img, scale):
14
+ """Mod crop images, used during testing.
15
+
16
+ Args:
17
+ img (ndarray): Input image.
18
+ scale (int): Scale factor.
19
+
20
+ Returns:
21
+ ndarray: Result image.
22
+ """
23
+ img = img.copy()
24
+ if img.ndim in (2, 3):
25
+ h, w = img.shape[0], img.shape[1]
26
+ h_remainder, w_remainder = h % scale, w % scale
27
+ img = img[:h - h_remainder, :w - w_remainder, ...]
28
+ else:
29
+ raise ValueError(f'Wrong img ndim: {img.ndim}.')
30
+ return img
31
+
32
+
33
+ def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path):
34
+ """Paired random crop.
35
+
36
+ It crops lists of lq and gt images with corresponding locations.
37
+
38
+ Args:
39
+ img_gts (list[ndarray] | ndarray): GT images. Note that all images
40
+ should have the same shape. If the input is an ndarray, it will
41
+ be transformed to a list containing itself.
42
+ img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
43
+ should have the same shape. If the input is an ndarray, it will
44
+ be transformed to a list containing itself.
45
+ gt_patch_size (int): GT patch size.
46
+ scale (int): Scale factor.
47
+ gt_path (str): Path to ground-truth.
48
+
49
+ Returns:
50
+ list[ndarray] | ndarray: GT images and LQ images. If returned results
51
+ only have one element, just return ndarray.
52
+ """
53
+
54
+ if not isinstance(img_gts, list):
55
+ img_gts = [img_gts]
56
+ if not isinstance(img_lqs, list):
57
+ img_lqs = [img_lqs]
58
+
59
+ h_lq, w_lq, _ = img_lqs[0].shape
60
+ h_gt, w_gt, _ = img_gts[0].shape
61
+ lq_patch_size = gt_patch_size // scale
62
+
63
+ if h_gt != h_lq * scale or w_gt != w_lq * scale:
64
+ raise ValueError(
65
+ f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
66
+ f'multiplication of LQ ({h_lq}, {w_lq}).')
67
+ if h_lq < lq_patch_size or w_lq < lq_patch_size:
68
+ raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
69
+ f'({lq_patch_size}, {lq_patch_size}). '
70
+ f'Please remove {gt_path}.')
71
+
72
+ # randomly choose top and left coordinates for lq patch
73
+ top = random.randint(0, h_lq - lq_patch_size)
74
+ left = random.randint(0, w_lq - lq_patch_size)
75
+
76
+ # crop lq patch
77
+ img_lqs = [
78
+ v[top:top + lq_patch_size, left:left + lq_patch_size, ...]
79
+ for v in img_lqs
80
+ ]
81
+
82
+ # crop corresponding gt patch
83
+ top_gt, left_gt = int(top * scale), int(left * scale)
84
+ img_gts = [
85
+ v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...]
86
+ for v in img_gts
87
+ ]
88
+ if len(img_gts) == 1:
89
+ img_gts = img_gts[0]
90
+ if len(img_lqs) == 1:
91
+ img_lqs = img_lqs[0]
92
+ return img_gts, img_lqs
93
+
94
+
95
+ def paired_random_crop_hw(img_gts, img_lqs, gt_patch_size_h, gt_patch_size_w, scale, gt_path):
96
+ """Paired random crop.
97
+
98
+ It crops lists of lq and gt images with corresponding locations.
99
+
100
+ Args:
101
+ img_gts (list[ndarray] | ndarray): GT images. Note that all images
102
+ should have the same shape. If the input is an ndarray, it will
103
+ be transformed to a list containing itself.
104
+ img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
105
+ should have the same shape. If the input is an ndarray, it will
106
+ be transformed to a list containing itself.
107
+ gt_patch_size (int): GT patch size.
108
+ scale (int): Scale factor.
109
+ gt_path (str): Path to ground-truth.
110
+
111
+ Returns:
112
+ list[ndarray] | ndarray: GT images and LQ images. If returned results
113
+ only have one element, just return ndarray.
114
+ """
115
+
116
+ if not isinstance(img_gts, list):
117
+ img_gts = [img_gts]
118
+ if not isinstance(img_lqs, list):
119
+ img_lqs = [img_lqs]
120
+
121
+ h_lq, w_lq, _ = img_lqs[0].shape
122
+ h_gt, w_gt, _ = img_gts[0].shape
123
+ lq_patch_size_h = gt_patch_size_h // scale
124
+ lq_patch_size_w = gt_patch_size_w // scale
125
+
126
+ # if h_gt != h_lq * scale or w_gt != w_lq * scale:
127
+ # raise ValueError(
128
+ # f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
129
+ # f'multiplication of LQ ({h_lq}, {w_lq}).')
130
+ # if h_lq < lq_patch_size or w_lq < lq_patch_size:
131
+ # raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
132
+ # f'({lq_patch_size}, {lq_patch_size}). '
133
+ # f'Please remove {gt_path}.')
134
+
135
+ # randomly choose top and left coordinates for lq patch
136
+ top = random.randint(0, h_lq - lq_patch_size_h)
137
+ left = random.randint(0, w_lq - lq_patch_size_w)
138
+
139
+ # crop lq patch
140
+ img_lqs = [
141
+ v[top:top + lq_patch_size_h, left:left + lq_patch_size_w, ...]
142
+ for v in img_lqs
143
+ ]
144
+
145
+ # crop corresponding gt patch
146
+ top_gt, left_gt = int(top * scale), int(left * scale)
147
+ img_gts = [
148
+ v[top_gt:top_gt + gt_patch_size_h, left_gt:left_gt + gt_patch_size_w, ...]
149
+ for v in img_gts
150
+ ]
151
+ if len(img_gts) == 1:
152
+ img_gts = img_gts[0]
153
+ if len(img_lqs) == 1:
154
+ img_lqs = img_lqs[0]
155
+ return img_gts, img_lqs
156
+
157
+ def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False, vflip=False):
158
+ """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
159
+
160
+ We use vertical flip and transpose for rotation implementation.
161
+ All the images in the list use the same augmentation.
162
+
163
+ Args:
164
+ imgs (list[ndarray] | ndarray): Images to be augmented. If the input
165
+ is an ndarray, it will be transformed to a list.
166
+ hflip (bool): Horizontal flip. Default: True.
167
+ rotation (bool): Ratotation. Default: True.
168
+ flows (list[ndarray]: Flows to be augmented. If the input is an
169
+ ndarray, it will be transformed to a list.
170
+ Dimension is (h, w, 2). Default: None.
171
+ return_status (bool): Return the status of flip and rotation.
172
+ Default: False.
173
+
174
+ Returns:
175
+ list[ndarray] | ndarray: Augmented images and flows. If returned
176
+ results only have one element, just return ndarray.
177
+
178
+ """
179
+ hflip = hflip and random.random() < 0.5
180
+ if vflip or rotation:
181
+ vflip = random.random() < 0.5
182
+ rot90 = rotation and random.random() < 0.5
183
+
184
+ def _augment(img):
185
+ if hflip: # horizontal
186
+ cv2.flip(img, 1, img)
187
+ if img.shape[2] == 6:
188
+ img = img[:,:,[3,4,5,0,1,2]].copy() # swap left/right
189
+ if vflip: # vertical
190
+ cv2.flip(img, 0, img)
191
+ if rot90:
192
+ img = img.transpose(1, 0, 2)
193
+ return img
194
+
195
+ def _augment_flow(flow):
196
+ if hflip: # horizontal
197
+ cv2.flip(flow, 1, flow)
198
+ flow[:, :, 0] *= -1
199
+ if vflip: # vertical
200
+ cv2.flip(flow, 0, flow)
201
+ flow[:, :, 1] *= -1
202
+ if rot90:
203
+ flow = flow.transpose(1, 0, 2)
204
+ flow = flow[:, :, [1, 0]]
205
+ return flow
206
+
207
+ if not isinstance(imgs, list):
208
+ imgs = [imgs]
209
+ imgs = [_augment(img) for img in imgs]
210
+ if len(imgs) == 1:
211
+ imgs = imgs[0]
212
+
213
+ if flows is not None:
214
+ if not isinstance(flows, list):
215
+ flows = [flows]
216
+ flows = [_augment_flow(flow) for flow in flows]
217
+ if len(flows) == 1:
218
+ flows = flows[0]
219
+ return imgs, flows
220
+ else:
221
+ if return_status:
222
+ return imgs, (hflip, vflip, rot90)
223
+ else:
224
+ return imgs
225
+
226
+
227
+ def img_rotate(img, angle, center=None, scale=1.0):
228
+ """Rotate image.
229
+
230
+ Args:
231
+ img (ndarray): Image to be rotated.
232
+ angle (float): Rotation angle in degrees. Positive values mean
233
+ counter-clockwise rotation.
234
+ center (tuple[int]): Rotation center. If the center is None,
235
+ initialize it as the center of the image. Default: None.
236
+ scale (float): Isotropic scale factor. Default: 1.0.
237
+ """
238
+ (h, w) = img.shape[:2]
239
+
240
+ if center is None:
241
+ center = (w // 2, h // 2)
242
+
243
+ matrix = cv2.getRotationMatrix2D(center, angle, scale)
244
+ rotated_img = cv2.warpAffine(img, matrix, (w, h))
245
+ return rotated_img
246
+
247
+
NAFNet/basicsr/data/video_test_dataset.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import glob
8
+ import torch
9
+ from os import path as osp
10
+ from torch.utils import data as data
11
+
12
+ from basicsr.data.data_util import (duf_downsample, generate_frame_indices,
13
+ read_img_seq)
14
+ from basicsr.utils import get_root_logger, scandir
15
+
16
+
17
+ class VideoTestDataset(data.Dataset):
18
+ """Video test dataset.
19
+
20
+ Supported datasets: Vid4, REDS4, REDSofficial.
21
+ More generally, it supports testing dataset with following structures:
22
+
23
+ dataroot
24
+ ├── subfolder1
25
+ ├── frame000
26
+ ├── frame001
27
+ ├── ...
28
+ ├── subfolder1
29
+ ├── frame000
30
+ ├── frame001
31
+ ├── ...
32
+ ├── ...
33
+
34
+ For testing datasets, there is no need to prepare LMDB files.
35
+
36
+ Args:
37
+ opt (dict): Config for train dataset. It contains the following keys:
38
+ dataroot_gt (str): Data root path for gt.
39
+ dataroot_lq (str): Data root path for lq.
40
+ io_backend (dict): IO backend type and other kwarg.
41
+ cache_data (bool): Whether to cache testing datasets.
42
+ name (str): Dataset name.
43
+ meta_info_file (str): The path to the file storing the list of test
44
+ folders. If not provided, all the folders in the dataroot will
45
+ be used.
46
+ num_frame (int): Window size for input frames.
47
+ padding (str): Padding mode.
48
+ """
49
+
50
+ def __init__(self, opt):
51
+ super(VideoTestDataset, self).__init__()
52
+ self.opt = opt
53
+ self.cache_data = opt['cache_data']
54
+ self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
55
+ self.data_info = {
56
+ 'lq_path': [],
57
+ 'gt_path': [],
58
+ 'folder': [],
59
+ 'idx': [],
60
+ 'border': []
61
+ }
62
+ # file client (io backend)
63
+ self.file_client = None
64
+ self.io_backend_opt = opt['io_backend']
65
+ assert self.io_backend_opt[
66
+ 'type'] != 'lmdb', 'No need to use lmdb during validation/test.'
67
+
68
+ logger = get_root_logger()
69
+ logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}')
70
+ self.imgs_lq, self.imgs_gt = {}, {}
71
+ if 'meta_info_file' in opt:
72
+ with open(opt['meta_info_file'], 'r') as fin:
73
+ subfolders = [line.split(' ')[0] for line in fin]
74
+ subfolders_lq = [
75
+ osp.join(self.lq_root, key) for key in subfolders
76
+ ]
77
+ subfolders_gt = [
78
+ osp.join(self.gt_root, key) for key in subfolders
79
+ ]
80
+ else:
81
+ subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
82
+ subfolders_gt = sorted(glob.glob(osp.join(self.gt_root, '*')))
83
+
84
+ if opt['name'].lower() in ['vid4', 'reds4', 'redsofficial']:
85
+ for subfolder_lq, subfolder_gt in zip(subfolders_lq,
86
+ subfolders_gt):
87
+ # get frame list for lq and gt
88
+ subfolder_name = osp.basename(subfolder_lq)
89
+ img_paths_lq = sorted(
90
+ list(scandir(subfolder_lq, full_path=True)))
91
+ img_paths_gt = sorted(
92
+ list(scandir(subfolder_gt, full_path=True)))
93
+
94
+ max_idx = len(img_paths_lq)
95
+ assert max_idx == len(img_paths_gt), (
96
+ f'Different number of images in lq ({max_idx})'
97
+ f' and gt folders ({len(img_paths_gt)})')
98
+
99
+ self.data_info['lq_path'].extend(img_paths_lq)
100
+ self.data_info['gt_path'].extend(img_paths_gt)
101
+ self.data_info['folder'].extend([subfolder_name] * max_idx)
102
+ for i in range(max_idx):
103
+ self.data_info['idx'].append(f'{i}/{max_idx}')
104
+ border_l = [0] * max_idx
105
+ for i in range(self.opt['num_frame'] // 2):
106
+ border_l[i] = 1
107
+ border_l[max_idx - i - 1] = 1
108
+ self.data_info['border'].extend(border_l)
109
+
110
+ # cache data or save the frame list
111
+ if self.cache_data:
112
+ logger.info(
113
+ f'Cache {subfolder_name} for VideoTestDataset...')
114
+ self.imgs_lq[subfolder_name] = read_img_seq(img_paths_lq)
115
+ self.imgs_gt[subfolder_name] = read_img_seq(img_paths_gt)
116
+ else:
117
+ self.imgs_lq[subfolder_name] = img_paths_lq
118
+ self.imgs_gt[subfolder_name] = img_paths_gt
119
+ else:
120
+ raise ValueError(
121
+ f'Non-supported video test dataset: {type(opt["name"])}')
122
+
123
+ def __getitem__(self, index):
124
+ folder = self.data_info['folder'][index]
125
+ idx, max_idx = self.data_info['idx'][index].split('/')
126
+ idx, max_idx = int(idx), int(max_idx)
127
+ border = self.data_info['border'][index]
128
+ lq_path = self.data_info['lq_path'][index]
129
+
130
+ select_idx = generate_frame_indices(
131
+ idx, max_idx, self.opt['num_frame'], padding=self.opt['padding'])
132
+
133
+ if self.cache_data:
134
+ imgs_lq = self.imgs_lq[folder].index_select(
135
+ 0, torch.LongTensor(select_idx))
136
+ img_gt = self.imgs_gt[folder][idx]
137
+ else:
138
+ img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
139
+ imgs_lq = read_img_seq(img_paths_lq)
140
+ img_gt = read_img_seq([self.imgs_gt[folder][idx]])
141
+ img_gt.squeeze_(0)
142
+
143
+ return {
144
+ 'lq': imgs_lq, # (t, c, h, w)
145
+ 'gt': img_gt, # (c, h, w)
146
+ 'folder': folder, # folder name
147
+ 'idx': self.data_info['idx'][index], # e.g., 0/99
148
+ 'border': border, # 1 for border, 0 for non-border
149
+ 'lq_path': lq_path # center frame
150
+ }
151
+
152
+ def __len__(self):
153
+ return len(self.data_info['gt_path'])
154
+
155
+
156
+ class VideoTestVimeo90KDataset(data.Dataset):
157
+ """Video test dataset for Vimeo90k-Test dataset.
158
+
159
+ It only keeps the center frame for testing.
160
+ For testing datasets, there is no need to prepare LMDB files.
161
+
162
+ Args:
163
+ opt (dict): Config for train dataset. It contains the following keys:
164
+ dataroot_gt (str): Data root path for gt.
165
+ dataroot_lq (str): Data root path for lq.
166
+ io_backend (dict): IO backend type and other kwarg.
167
+ cache_data (bool): Whether to cache testing datasets.
168
+ name (str): Dataset name.
169
+ meta_info_file (str): The path to the file storing the list of test
170
+ folders. If not provided, all the folders in the dataroot will
171
+ be used.
172
+ num_frame (int): Window size for input frames.
173
+ padding (str): Padding mode.
174
+ """
175
+
176
+ def __init__(self, opt):
177
+ super(VideoTestVimeo90KDataset, self).__init__()
178
+ self.opt = opt
179
+ self.cache_data = opt['cache_data']
180
+ if self.cache_data:
181
+ raise NotImplementedError(
182
+ 'cache_data in Vimeo90K-Test dataset is not implemented.')
183
+ self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
184
+ self.data_info = {
185
+ 'lq_path': [],
186
+ 'gt_path': [],
187
+ 'folder': [],
188
+ 'idx': [],
189
+ 'border': []
190
+ }
191
+ neighbor_list = [
192
+ i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])
193
+ ]
194
+
195
+ # file client (io backend)
196
+ self.file_client = None
197
+ self.io_backend_opt = opt['io_backend']
198
+ assert self.io_backend_opt[
199
+ 'type'] != 'lmdb', 'No need to use lmdb during validation/test.'
200
+
201
+ logger = get_root_logger()
202
+ logger.info(f'Generate data info for VideoTestDataset - {opt["name"]}')
203
+ with open(opt['meta_info_file'], 'r') as fin:
204
+ subfolders = [line.split(' ')[0] for line in fin]
205
+ for idx, subfolder in enumerate(subfolders):
206
+ gt_path = osp.join(self.gt_root, subfolder, 'im4.png')
207
+ self.data_info['gt_path'].append(gt_path)
208
+ lq_paths = [
209
+ osp.join(self.lq_root, subfolder, f'im{i}.png')
210
+ for i in neighbor_list
211
+ ]
212
+ self.data_info['lq_path'].append(lq_paths)
213
+ self.data_info['folder'].append('vimeo90k')
214
+ self.data_info['idx'].append(f'{idx}/{len(subfolders)}')
215
+ self.data_info['border'].append(0)
216
+
217
+ def __getitem__(self, index):
218
+ lq_path = self.data_info['lq_path'][index]
219
+ gt_path = self.data_info['gt_path'][index]
220
+ imgs_lq = read_img_seq(lq_path)
221
+ img_gt = read_img_seq([gt_path])
222
+ img_gt.squeeze_(0)
223
+
224
+ return {
225
+ 'lq': imgs_lq, # (t, c, h, w)
226
+ 'gt': img_gt, # (c, h, w)
227
+ 'folder': self.data_info['folder'][index], # folder name
228
+ 'idx': self.data_info['idx'][index], # e.g., 0/843
229
+ 'border': self.data_info['border'][index], # 0 for non-border
230
+ 'lq_path': lq_path[self.opt['num_frame'] // 2] # center frame
231
+ }
232
+
233
+ def __len__(self):
234
+ return len(self.data_info['gt_path'])
235
+
236
+
237
+ class VideoTestDUFDataset(VideoTestDataset):
238
+ """ Video test dataset for DUF dataset.
239
+
240
+ Args:
241
+ opt (dict): Config for train dataset.
242
+ Most of keys are the same as VideoTestDataset.
243
+ It has the follwing extra keys:
244
+
245
+ use_duf_downsampling (bool): Whether to use duf downsampling to
246
+ generate low-resolution frames.
247
+ scale (bool): Scale, which will be added automatically.
248
+ """
249
+
250
+ def __getitem__(self, index):
251
+ folder = self.data_info['folder'][index]
252
+ idx, max_idx = self.data_info['idx'][index].split('/')
253
+ idx, max_idx = int(idx), int(max_idx)
254
+ border = self.data_info['border'][index]
255
+ lq_path = self.data_info['lq_path'][index]
256
+
257
+ select_idx = generate_frame_indices(
258
+ idx, max_idx, self.opt['num_frame'], padding=self.opt['padding'])
259
+
260
+ if self.cache_data:
261
+ if self.opt['use_duf_downsampling']:
262
+ # read imgs_gt to generate low-resolution frames
263
+ imgs_lq = self.imgs_gt[folder].index_select(
264
+ 0, torch.LongTensor(select_idx))
265
+ imgs_lq = duf_downsample(
266
+ imgs_lq, kernel_size=13, scale=self.opt['scale'])
267
+ else:
268
+ imgs_lq = self.imgs_lq[folder].index_select(
269
+ 0, torch.LongTensor(select_idx))
270
+ img_gt = self.imgs_gt[folder][idx]
271
+ else:
272
+ if self.opt['use_duf_downsampling']:
273
+ img_paths_lq = [self.imgs_gt[folder][i] for i in select_idx]
274
+ # read imgs_gt to generate low-resolution frames
275
+ imgs_lq = read_img_seq(
276
+ img_paths_lq,
277
+ require_mod_crop=True,
278
+ scale=self.opt['scale'])
279
+ imgs_lq = duf_downsample(
280
+ imgs_lq, kernel_size=13, scale=self.opt['scale'])
281
+ else:
282
+ img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
283
+ imgs_lq = read_img_seq(img_paths_lq)
284
+ img_gt = read_img_seq([self.imgs_gt[folder][idx]],
285
+ require_mod_crop=True,
286
+ scale=self.opt['scale'])
287
+ img_gt.squeeze_(0)
288
+
289
+ return {
290
+ 'lq': imgs_lq, # (t, c, h, w)
291
+ 'gt': img_gt, # (c, h, w)
292
+ 'folder': folder, # folder name
293
+ 'idx': self.data_info['idx'][index], # e.g., 0/99
294
+ 'border': border, # 1 for border, 0 for non-border
295
+ 'lq_path': lq_path # center frame
296
+ }
297
+
298
+
299
+ class VideoRecurrentTestDataset(VideoTestDataset):
300
+ """Video test dataset for recurrent architectures, which takes LR video
301
+ frames as input and output corresponding HR video frames.
302
+
303
+ Args:
304
+ Same as VideoTestDataset.
305
+ Unused opt:
306
+ padding (str): Padding mode.
307
+
308
+ """
309
+
310
+ def __init__(self, opt):
311
+ super(VideoRecurrentTestDataset, self).__init__(opt)
312
+ # Find unique folder strings
313
+ self.folders = sorted(list(set(self.data_info['folder'])))
314
+
315
+ def __getitem__(self, index):
316
+ folder = self.folders[index]
317
+
318
+ if self.cache_data:
319
+ imgs_lq = self.imgs_lq[folder]
320
+ imgs_gt = self.imgs_gt[folder]
321
+ else:
322
+ raise NotImplementedError('Without cache_data is not implemented.')
323
+
324
+ return {
325
+ 'lq': imgs_lq,
326
+ 'gt': imgs_gt,
327
+ 'folder': folder,
328
+ }
329
+
330
+ def __len__(self):
331
+ return len(self.folders)
NAFNet/basicsr/data/vimeo90k_dataset.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import random
8
+ import torch
9
+ from pathlib import Path
10
+ from torch.utils import data as data
11
+
12
+ from basicsr.data.transforms import augment, paired_random_crop
13
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
14
+
15
+
16
+ class Vimeo90KDataset(data.Dataset):
17
+ """Vimeo90K dataset for training.
18
+
19
+ The keys are generated from a meta info txt file.
20
+ basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
21
+
22
+ Each line contains:
23
+ 1. clip name; 2. frame number; 3. image shape, seperated by a white space.
24
+ Examples:
25
+ 00001/0001 7 (256,448,3)
26
+ 00001/0002 7 (256,448,3)
27
+
28
+ Key examples: "00001/0001"
29
+ GT (gt): Ground-Truth;
30
+ LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
31
+
32
+ The neighboring frame list for different num_frame:
33
+ num_frame | frame list
34
+ 1 | 4
35
+ 3 | 3,4,5
36
+ 5 | 2,3,4,5,6
37
+ 7 | 1,2,3,4,5,6,7
38
+
39
+ Args:
40
+ opt (dict): Config for train dataset. It contains the following keys:
41
+ dataroot_gt (str): Data root path for gt.
42
+ dataroot_lq (str): Data root path for lq.
43
+ meta_info_file (str): Path for meta information file.
44
+ io_backend (dict): IO backend type and other kwarg.
45
+
46
+ num_frame (int): Window size for input frames.
47
+ gt_size (int): Cropped patched size for gt patches.
48
+ random_reverse (bool): Random reverse input frames.
49
+ use_flip (bool): Use horizontal flips.
50
+ use_rot (bool): Use rotation (use vertical flip and transposing h
51
+ and w for implementation).
52
+
53
+ scale (bool): Scale, which will be added automatically.
54
+ """
55
+
56
+ def __init__(self, opt):
57
+ super(Vimeo90KDataset, self).__init__()
58
+ self.opt = opt
59
+ self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(
60
+ opt['dataroot_lq'])
61
+
62
+ with open(opt['meta_info_file'], 'r') as fin:
63
+ self.keys = [line.split(' ')[0] for line in fin]
64
+
65
+ # file client (io backend)
66
+ self.file_client = None
67
+ self.io_backend_opt = opt['io_backend']
68
+ self.is_lmdb = False
69
+ if self.io_backend_opt['type'] == 'lmdb':
70
+ self.is_lmdb = True
71
+ self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
72
+ self.io_backend_opt['client_keys'] = ['lq', 'gt']
73
+
74
+ # indices of input images
75
+ self.neighbor_list = [
76
+ i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])
77
+ ]
78
+
79
+ # temporal augmentation configs
80
+ self.random_reverse = opt['random_reverse']
81
+ logger = get_root_logger()
82
+ logger.info(f'Random reverse is {self.random_reverse}.')
83
+
84
+ def __getitem__(self, index):
85
+ if self.file_client is None:
86
+ self.file_client = FileClient(
87
+ self.io_backend_opt.pop('type'), **self.io_backend_opt)
88
+
89
+ # random reverse
90
+ if self.random_reverse and random.random() < 0.5:
91
+ self.neighbor_list.reverse()
92
+
93
+ scale = self.opt['scale']
94
+ gt_size = self.opt['gt_size']
95
+ key = self.keys[index]
96
+ clip, seq = key.split('/') # key example: 00001/0001
97
+
98
+ # get the GT frame (im4.png)
99
+ if self.is_lmdb:
100
+ img_gt_path = f'{key}/im4'
101
+ else:
102
+ img_gt_path = self.gt_root / clip / seq / 'im4.png'
103
+ img_bytes = self.file_client.get(img_gt_path, 'gt')
104
+ img_gt = imfrombytes(img_bytes, float32=True)
105
+
106
+ # get the neighboring LQ frames
107
+ img_lqs = []
108
+ for neighbor in self.neighbor_list:
109
+ if self.is_lmdb:
110
+ img_lq_path = f'{clip}/{seq}/im{neighbor}'
111
+ else:
112
+ img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png'
113
+ img_bytes = self.file_client.get(img_lq_path, 'lq')
114
+ img_lq = imfrombytes(img_bytes, float32=True)
115
+ img_lqs.append(img_lq)
116
+
117
+ # randomly crop
118
+ img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale,
119
+ img_gt_path)
120
+
121
+ # augmentation - flip, rotate
122
+ img_lqs.append(img_gt)
123
+ img_results = augment(img_lqs, self.opt['use_flip'],
124
+ self.opt['use_rot'])
125
+
126
+ img_results = img2tensor(img_results)
127
+ img_lqs = torch.stack(img_results[0:-1], dim=0)
128
+ img_gt = img_results[-1]
129
+
130
+ # img_lqs: (t, c, h, w)
131
+ # img_gt: (c, h, w)
132
+ # key: str
133
+ return {'lq': img_lqs, 'gt': img_gt, 'key': key}
134
+
135
+ def __len__(self):
136
+ return len(self.keys)
NAFNet/basicsr/demo.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import torch
8
+
9
+ # from basicsr.data import create_dataloader, create_dataset
10
+ from basicsr.models import create_model
11
+ from basicsr.train import parse_options
12
+ from basicsr.utils import FileClient, imfrombytes, img2tensor, padding, tensor2img, imwrite
13
+
14
+ # from basicsr.utils import (get_env_info, get_root_logger, get_time_str,
15
+ # make_exp_dirs)
16
+ # from basicsr.utils.options import dict2str
17
+
18
+ def main():
19
+ # parse options, set distributed setting, set ramdom seed
20
+ opt = parse_options(is_train=False)
21
+ opt['num_gpu'] = torch.cuda.device_count()
22
+
23
+ img_path = opt['img_path'].get('input_img')
24
+ output_path = opt['img_path'].get('output_img')
25
+
26
+
27
+ ## 1. read image
28
+ file_client = FileClient('disk')
29
+
30
+ img_bytes = file_client.get(img_path, None)
31
+ try:
32
+ img = imfrombytes(img_bytes, float32=True)
33
+ except:
34
+ raise Exception("path {} not working".format(img_path))
35
+
36
+ img = img2tensor(img, bgr2rgb=True, float32=True)
37
+
38
+
39
+
40
+ ## 2. run inference
41
+ opt['dist'] = False
42
+ model = create_model(opt)
43
+
44
+ model.feed_data(data={'lq': img.unsqueeze(dim=0)})
45
+
46
+ if model.opt['val'].get('grids', False):
47
+ model.grids()
48
+
49
+ model.test()
50
+
51
+ if model.opt['val'].get('grids', False):
52
+ model.grids_inverse()
53
+
54
+ visuals = model.get_current_visuals()
55
+ sr_img = tensor2img([visuals['result']])
56
+ imwrite(sr_img, output_path)
57
+
58
+ print(f'inference {img_path} .. finished. saved to {output_path}')
59
+
60
+ if __name__ == '__main__':
61
+ main()
62
+
NAFNet/basicsr/demo_ssr.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import torch
8
+
9
+ # from basicsr.data import create_dataloader, create_dataset
10
+ from basicsr.models import create_model
11
+ from basicsr.utils import FileClient, imfrombytes, img2tensor, padding, tensor2img, imwrite, set_random_seed
12
+
13
+ import argparse
14
+ from basicsr.utils.options import dict2str, parse
15
+ from basicsr.utils.dist_util import get_dist_info, init_dist
16
+ import random
17
+
18
+ def parse_options(is_train=True):
19
+ parser = argparse.ArgumentParser()
20
+ parser.add_argument(
21
+ '-opt', type=str, required=True, help='Path to option YAML file.')
22
+ parser.add_argument(
23
+ '--launcher',
24
+ choices=['none', 'pytorch', 'slurm'],
25
+ default='none',
26
+ help='job launcher')
27
+ parser.add_argument('--local_rank', type=int, default=0)
28
+
29
+ parser.add_argument('--input_l_path', type=str, required=True, help='The path to the input left image. For stereo image inference only.')
30
+ parser.add_argument('--input_r_path', type=str, required=True, help='The path to the input right image. For stereo image inference only.')
31
+ parser.add_argument('--output_l_path', type=str, required=True, help='The path to the output left image. For stereo image inference only.')
32
+ parser.add_argument('--output_r_path', type=str, required=True, help='The path to the output right image. For stereo image inference only.')
33
+
34
+ args = parser.parse_args()
35
+ opt = parse(args.opt, is_train=is_train)
36
+
37
+ # distributed settings
38
+ if args.launcher == 'none':
39
+ opt['dist'] = False
40
+ print('Disable distributed.', flush=True)
41
+ else:
42
+ opt['dist'] = True
43
+ if args.launcher == 'slurm' and 'dist_params' in opt:
44
+ init_dist(args.launcher, **opt['dist_params'])
45
+ else:
46
+ init_dist(args.launcher)
47
+ print('init dist .. ', args.launcher)
48
+
49
+ opt['rank'], opt['world_size'] = get_dist_info()
50
+
51
+ # random seed
52
+ seed = opt.get('manual_seed')
53
+ if seed is None:
54
+ seed = random.randint(1, 10000)
55
+ opt['manual_seed'] = seed
56
+ set_random_seed(seed + opt['rank'])
57
+
58
+ opt['img_path'] = {
59
+ 'input_l': args.input_l_path,
60
+ 'input_r': args.input_r_path,
61
+ 'output_l': args.output_l_path,
62
+ 'output_r': args.output_r_path
63
+ }
64
+
65
+ return opt
66
+
67
+ def imread(img_path):
68
+ file_client = FileClient('disk')
69
+ img_bytes = file_client.get(img_path, None)
70
+ try:
71
+ img = imfrombytes(img_bytes, float32=True)
72
+ except:
73
+ raise Exception("path {} not working".format(img_path))
74
+
75
+ img = img2tensor(img, bgr2rgb=True, float32=True)
76
+ return img
77
+
78
+ def main():
79
+ # parse options, set distributed setting, set ramdom seed
80
+ opt = parse_options(is_train=False)
81
+ opt['num_gpu'] = torch.cuda.device_count()
82
+
83
+ img_l_path = opt['img_path'].get('input_l')
84
+ img_r_path = opt['img_path'].get('input_r')
85
+ output_l_path = opt['img_path'].get('output_l')
86
+ output_r_path = opt['img_path'].get('output_r')
87
+
88
+ ## 1. read image
89
+ img_l = imread(img_l_path)
90
+ img_r = imread(img_r_path)
91
+ img = torch.cat([img_l, img_r], dim=0)
92
+
93
+ ## 2. run inference
94
+ opt['dist'] = False
95
+ model = create_model(opt)
96
+
97
+ model.feed_data(data={'lq': img.unsqueeze(dim=0)})
98
+
99
+ if model.opt['val'].get('grids', False):
100
+ model.grids()
101
+
102
+ model.test()
103
+
104
+ if model.opt['val'].get('grids', False):
105
+ model.grids_inverse()
106
+
107
+ visuals = model.get_current_visuals()
108
+ sr_img_l = visuals['result'][:,:3]
109
+ sr_img_r = visuals['result'][:,3:]
110
+ sr_img_l, sr_img_r = tensor2img([sr_img_l, sr_img_r])
111
+ imwrite(sr_img_l, output_l_path)
112
+ imwrite(sr_img_r, output_r_path)
113
+
114
+ print(f'inference {img_l_path} .. finished. saved to {output_l_path}')
115
+ print(f'inference {img_r_path} .. finished. saved to {output_r_path}')
116
+
117
+ if __name__ == '__main__':
118
+ main()
119
+
NAFNet/basicsr/metrics/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ from .niqe import calculate_niqe
8
+ from .psnr_ssim import calculate_psnr, calculate_ssim, calculate_ssim_left, calculate_psnr_left, calculate_skimage_ssim, calculate_skimage_ssim_left
9
+
10
+ __all__ = ['calculate_psnr', 'calculate_ssim', 'calculate_niqe', 'calculate_ssim_left', 'calculate_psnr_left', 'calculate_skimage_ssim', 'calculate_skimage_ssim_left']
NAFNet/basicsr/metrics/fid.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+ from scipy import linalg
11
+ from tqdm import tqdm
12
+
13
+ from basicsr.models.archs.inception import InceptionV3
14
+
15
+
16
+ def load_patched_inception_v3(device='cuda',
17
+ resize_input=True,
18
+ normalize_input=False):
19
+ # we may not resize the input, but in [rosinality/stylegan2-pytorch] it
20
+ # does resize the input.
21
+ inception = InceptionV3([3],
22
+ resize_input=resize_input,
23
+ normalize_input=normalize_input)
24
+ inception = nn.DataParallel(inception).eval().to(device)
25
+ return inception
26
+
27
+
28
+ @torch.no_grad()
29
+ def extract_inception_features(data_generator,
30
+ inception,
31
+ len_generator=None,
32
+ device='cuda'):
33
+ """Extract inception features.
34
+
35
+ Args:
36
+ data_generator (generator): A data generator.
37
+ inception (nn.Module): Inception model.
38
+ len_generator (int): Length of the data_generator to show the
39
+ progressbar. Default: None.
40
+ device (str): Device. Default: cuda.
41
+
42
+ Returns:
43
+ Tensor: Extracted features.
44
+ """
45
+ if len_generator is not None:
46
+ pbar = tqdm(total=len_generator, unit='batch', desc='Extract')
47
+ else:
48
+ pbar = None
49
+ features = []
50
+
51
+ for data in data_generator:
52
+ if pbar:
53
+ pbar.update(1)
54
+ data = data.to(device)
55
+ feature = inception(data)[0].view(data.shape[0], -1)
56
+ features.append(feature.to('cpu'))
57
+ if pbar:
58
+ pbar.close()
59
+ features = torch.cat(features, 0)
60
+ return features
61
+
62
+
63
+ def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6):
64
+ """Numpy implementation of the Frechet Distance.
65
+
66
+ The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
67
+ and X_2 ~ N(mu_2, C_2) is
68
+ d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
69
+ Stable version by Dougal J. Sutherland.
70
+
71
+ Args:
72
+ mu1 (np.array): The sample mean over activations.
73
+ sigma1 (np.array): The covariance matrix over activations for
74
+ generated samples.
75
+ mu2 (np.array): The sample mean over activations, precalculated on an
76
+ representative data set.
77
+ sigma2 (np.array): The covariance matrix over activations,
78
+ precalculated on an representative data set.
79
+
80
+ Returns:
81
+ float: The Frechet Distance.
82
+ """
83
+ assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths'
84
+ assert sigma1.shape == sigma2.shape, (
85
+ 'Two covariances have different dimensions')
86
+
87
+ cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False)
88
+
89
+ # Product might be almost singular
90
+ if not np.isfinite(cov_sqrt).all():
91
+ print('Product of cov matrices is singular. Adding {eps} to diagonal '
92
+ 'of cov estimates')
93
+ offset = np.eye(sigma1.shape[0]) * eps
94
+ cov_sqrt = linalg.sqrtm((sigma1 + offset) @ (sigma2 + offset))
95
+
96
+ # Numerical error might give slight imaginary component
97
+ if np.iscomplexobj(cov_sqrt):
98
+ if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3):
99
+ m = np.max(np.abs(cov_sqrt.imag))
100
+ raise ValueError(f'Imaginary component {m}')
101
+ cov_sqrt = cov_sqrt.real
102
+
103
+ mean_diff = mu1 - mu2
104
+ mean_norm = mean_diff @ mean_diff
105
+ trace = np.trace(sigma1) + np.trace(sigma2) - 2 * np.trace(cov_sqrt)
106
+ fid = mean_norm + trace
107
+
108
+ return fid
NAFNet/basicsr/metrics/metric_util.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import numpy as np
8
+
9
+ from basicsr.utils.matlab_functions import bgr2ycbcr
10
+
11
+
12
+ def reorder_image(img, input_order='HWC'):
13
+ """Reorder images to 'HWC' order.
14
+
15
+ If the input_order is (h, w), return (h, w, 1);
16
+ If the input_order is (c, h, w), return (h, w, c);
17
+ If the input_order is (h, w, c), return as it is.
18
+
19
+ Args:
20
+ img (ndarray): Input image.
21
+ input_order (str): Whether the input order is 'HWC' or 'CHW'.
22
+ If the input image shape is (h, w), input_order will not have
23
+ effects. Default: 'HWC'.
24
+
25
+ Returns:
26
+ ndarray: reordered image.
27
+ """
28
+
29
+ if input_order not in ['HWC', 'CHW']:
30
+ raise ValueError(
31
+ f'Wrong input_order {input_order}. Supported input_orders are '
32
+ "'HWC' and 'CHW'")
33
+ if len(img.shape) == 2:
34
+ img = img[..., None]
35
+ if input_order == 'CHW':
36
+ img = img.transpose(1, 2, 0)
37
+ return img
38
+
39
+
40
+ def to_y_channel(img):
41
+ """Change to Y channel of YCbCr.
42
+
43
+ Args:
44
+ img (ndarray): Images with range [0, 255].
45
+
46
+ Returns:
47
+ (ndarray): Images with range [0, 255] (float type) without round.
48
+ """
49
+ img = img.astype(np.float32) / 255.
50
+ if img.ndim == 3 and img.shape[2] == 3:
51
+ img = bgr2ycbcr(img, y_only=True)
52
+ img = img[..., None]
53
+ return img * 255.
NAFNet/basicsr/metrics/niqe.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import cv2
8
+ import math
9
+ import numpy as np
10
+ from scipy.ndimage.filters import convolve
11
+ from scipy.special import gamma
12
+
13
+ from basicsr.metrics.metric_util import reorder_image, to_y_channel
14
+
15
+
16
+ def estimate_aggd_param(block):
17
+ """Estimate AGGD (Asymmetric Generalized Gaussian Distribution) paramters.
18
+
19
+ Args:
20
+ block (ndarray): 2D Image block.
21
+
22
+ Returns:
23
+ tuple: alpha (float), beta_l (float) and beta_r (float) for the AGGD
24
+ distribution (Estimating the parames in Equation 7 in the paper).
25
+ """
26
+ block = block.flatten()
27
+ gam = np.arange(0.2, 10.001, 0.001) # len = 9801
28
+ gam_reciprocal = np.reciprocal(gam)
29
+ r_gam = np.square(gamma(gam_reciprocal * 2)) / (
30
+ gamma(gam_reciprocal) * gamma(gam_reciprocal * 3))
31
+
32
+ left_std = np.sqrt(np.mean(block[block < 0]**2))
33
+ right_std = np.sqrt(np.mean(block[block > 0]**2))
34
+ gammahat = left_std / right_std
35
+ rhat = (np.mean(np.abs(block)))**2 / np.mean(block**2)
36
+ rhatnorm = (rhat * (gammahat**3 + 1) *
37
+ (gammahat + 1)) / ((gammahat**2 + 1)**2)
38
+ array_position = np.argmin((r_gam - rhatnorm)**2)
39
+
40
+ alpha = gam[array_position]
41
+ beta_l = left_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
42
+ beta_r = right_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
43
+ return (alpha, beta_l, beta_r)
44
+
45
+
46
+ def compute_feature(block):
47
+ """Compute features.
48
+
49
+ Args:
50
+ block (ndarray): 2D Image block.
51
+
52
+ Returns:
53
+ list: Features with length of 18.
54
+ """
55
+ feat = []
56
+ alpha, beta_l, beta_r = estimate_aggd_param(block)
57
+ feat.extend([alpha, (beta_l + beta_r) / 2])
58
+
59
+ # distortions disturb the fairly regular structure of natural images.
60
+ # This deviation can be captured by analyzing the sample distribution of
61
+ # the products of pairs of adjacent coefficients computed along
62
+ # horizontal, vertical and diagonal orientations.
63
+ shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
64
+ for i in range(len(shifts)):
65
+ shifted_block = np.roll(block, shifts[i], axis=(0, 1))
66
+ alpha, beta_l, beta_r = estimate_aggd_param(block * shifted_block)
67
+ # Eq. 8
68
+ mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha))
69
+ feat.extend([alpha, mean, beta_l, beta_r])
70
+ return feat
71
+
72
+
73
+ def niqe(img,
74
+ mu_pris_param,
75
+ cov_pris_param,
76
+ gaussian_window,
77
+ block_size_h=96,
78
+ block_size_w=96):
79
+ """Calculate NIQE (Natural Image Quality Evaluator) metric.
80
+
81
+ Ref: Making a "Completely Blind" Image Quality Analyzer.
82
+ This implementation could produce almost the same results as the official
83
+ MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
84
+
85
+ Note that we do not include block overlap height and width, since they are
86
+ always 0 in the official implementation.
87
+
88
+ For good performance, it is advisable by the official implemtation to
89
+ divide the distorted image in to the same size patched as used for the
90
+ construction of multivariate Gaussian model.
91
+
92
+ Args:
93
+ img (ndarray): Input image whose quality needs to be computed. The
94
+ image must be a gray or Y (of YCbCr) image with shape (h, w).
95
+ Range [0, 255] with float type.
96
+ mu_pris_param (ndarray): Mean of a pre-defined multivariate Gaussian
97
+ model calculated on the pristine dataset.
98
+ cov_pris_param (ndarray): Covariance of a pre-defined multivariate
99
+ Gaussian model calculated on the pristine dataset.
100
+ gaussian_window (ndarray): A 7x7 Gaussian window used for smoothing the
101
+ image.
102
+ block_size_h (int): Height of the blocks in to which image is divided.
103
+ Default: 96 (the official recommended value).
104
+ block_size_w (int): Width of the blocks in to which image is divided.
105
+ Default: 96 (the official recommended value).
106
+ """
107
+ assert img.ndim == 2, (
108
+ 'Input image must be a gray or Y (of YCbCr) image with shape (h, w).')
109
+ # crop image
110
+ h, w = img.shape
111
+ num_block_h = math.floor(h / block_size_h)
112
+ num_block_w = math.floor(w / block_size_w)
113
+ img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w]
114
+
115
+ distparam = [] # dist param is actually the multiscale features
116
+ for scale in (1, 2): # perform on two scales (1, 2)
117
+ mu = convolve(img, gaussian_window, mode='nearest')
118
+ sigma = np.sqrt(
119
+ np.abs(
120
+ convolve(np.square(img), gaussian_window, mode='nearest') -
121
+ np.square(mu)))
122
+ # normalize, as in Eq. 1 in the paper
123
+ img_nomalized = (img - mu) / (sigma + 1)
124
+
125
+ feat = []
126
+ for idx_w in range(num_block_w):
127
+ for idx_h in range(num_block_h):
128
+ # process ecah block
129
+ block = img_nomalized[idx_h * block_size_h //
130
+ scale:(idx_h + 1) * block_size_h //
131
+ scale, idx_w * block_size_w //
132
+ scale:(idx_w + 1) * block_size_w //
133
+ scale]
134
+ feat.append(compute_feature(block))
135
+
136
+ distparam.append(np.array(feat))
137
+ # TODO: matlab bicubic downsample with anti-aliasing
138
+ # for simplicity, now we use opencv instead, which will result in
139
+ # a slight difference.
140
+ if scale == 1:
141
+ h, w = img.shape
142
+ img = cv2.resize(
143
+ img / 255., (w // 2, h // 2), interpolation=cv2.INTER_LINEAR)
144
+ img = img * 255.
145
+
146
+ distparam = np.concatenate(distparam, axis=1)
147
+
148
+ # fit a MVG (multivariate Gaussian) model to distorted patch features
149
+ mu_distparam = np.nanmean(distparam, axis=0)
150
+ # use nancov. ref: https://ww2.mathworks.cn/help/stats/nancov.html
151
+ distparam_no_nan = distparam[~np.isnan(distparam).any(axis=1)]
152
+ cov_distparam = np.cov(distparam_no_nan, rowvar=False)
153
+
154
+ # compute niqe quality, Eq. 10 in the paper
155
+ invcov_param = np.linalg.pinv((cov_pris_param + cov_distparam) / 2)
156
+ quality = np.matmul(
157
+ np.matmul((mu_pris_param - mu_distparam), invcov_param),
158
+ np.transpose((mu_pris_param - mu_distparam)))
159
+ quality = np.sqrt(quality)
160
+
161
+ return quality
162
+
163
+
164
+ def calculate_niqe(img, crop_border, input_order='HWC', convert_to='y'):
165
+ """Calculate NIQE (Natural Image Quality Evaluator) metric.
166
+
167
+ Ref: Making a "Completely Blind" Image Quality Analyzer.
168
+ This implementation could produce almost the same results as the official
169
+ MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
170
+
171
+ We use the official params estimated from the pristine dataset.
172
+ We use the recommended block size (96, 96) without overlaps.
173
+
174
+ Args:
175
+ img (ndarray): Input image whose quality needs to be computed.
176
+ The input image must be in range [0, 255] with float/int type.
177
+ The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order)
178
+ If the input order is 'HWC' or 'CHW', it will be converted to gray
179
+ or Y (of YCbCr) image according to the ``convert_to`` argument.
180
+ crop_border (int): Cropped pixels in each edge of an image. These
181
+ pixels are not involved in the metric calculation.
182
+ input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'.
183
+ Default: 'HWC'.
184
+ convert_to (str): Whether coverted to 'y' (of MATLAB YCbCr) or 'gray'.
185
+ Default: 'y'.
186
+
187
+ Returns:
188
+ float: NIQE result.
189
+ """
190
+
191
+ # we use the official params estimated from the pristine dataset.
192
+ niqe_pris_params = np.load('basicsr/metrics/niqe_pris_params.npz')
193
+ mu_pris_param = niqe_pris_params['mu_pris_param']
194
+ cov_pris_param = niqe_pris_params['cov_pris_param']
195
+ gaussian_window = niqe_pris_params['gaussian_window']
196
+
197
+ img = img.astype(np.float32)
198
+ if input_order != 'HW':
199
+ img = reorder_image(img, input_order=input_order)
200
+ if convert_to == 'y':
201
+ img = to_y_channel(img)
202
+ elif convert_to == 'gray':
203
+ img = cv2.cvtColor(img / 255., cv2.COLOR_BGR2GRAY) * 255.
204
+ img = np.squeeze(img)
205
+
206
+ if crop_border != 0:
207
+ img = img[crop_border:-crop_border, crop_border:-crop_border]
208
+
209
+ niqe_result = niqe(img, mu_pris_param, cov_pris_param, gaussian_window)
210
+
211
+ return niqe_result
NAFNet/basicsr/metrics/niqe_pris_params.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a7c182a68c9e7f1b2e2e5ec723279d6f65d912b6fcaf37eb2bf03d7367c4296
3
+ size 11850
NAFNet/basicsr/metrics/psnr_ssim.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # modified from https://github.com/mayorx/matlab_ssim_pytorch_implementation/blob/main/calc_ssim.py
5
+ # ------------------------------------------------------------------------
6
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
7
+ # Copyright 2018-2020 BasicSR Authors
8
+ # ------------------------------------------------------------------------
9
+ import cv2
10
+ import numpy as np
11
+
12
+ from basicsr.metrics.metric_util import reorder_image, to_y_channel
13
+ from skimage.metrics import structural_similarity
14
+ import torch
15
+
16
+ def calculate_psnr(img1,
17
+ img2,
18
+ crop_border,
19
+ input_order='HWC',
20
+ test_y_channel=False):
21
+ """Calculate PSNR (Peak Signal-to-Noise Ratio).
22
+
23
+ Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
24
+
25
+ Args:
26
+ img1 (ndarray/tensor): Images with range [0, 255]/[0, 1].
27
+ img2 (ndarray/tensor): Images with range [0, 255]/[0, 1].
28
+ crop_border (int): Cropped pixels in each edge of an image. These
29
+ pixels are not involved in the PSNR calculation.
30
+ input_order (str): Whether the input order is 'HWC' or 'CHW'.
31
+ Default: 'HWC'.
32
+ test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
33
+
34
+ Returns:
35
+ float: psnr result.
36
+ """
37
+
38
+ assert img1.shape == img2.shape, (
39
+ f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
40
+ if input_order not in ['HWC', 'CHW']:
41
+ raise ValueError(
42
+ f'Wrong input_order {input_order}. Supported input_orders are '
43
+ '"HWC" and "CHW"')
44
+ if type(img1) == torch.Tensor:
45
+ if len(img1.shape) == 4:
46
+ img1 = img1.squeeze(0)
47
+ img1 = img1.detach().cpu().numpy().transpose(1,2,0)
48
+ if type(img2) == torch.Tensor:
49
+ if len(img2.shape) == 4:
50
+ img2 = img2.squeeze(0)
51
+ img2 = img2.detach().cpu().numpy().transpose(1,2,0)
52
+
53
+ img1 = reorder_image(img1, input_order=input_order)
54
+ img2 = reorder_image(img2, input_order=input_order)
55
+ img1 = img1.astype(np.float64)
56
+ img2 = img2.astype(np.float64)
57
+
58
+ if crop_border != 0:
59
+ img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
60
+ img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
61
+
62
+ def _psnr(img1, img2):
63
+ if test_y_channel:
64
+ img1 = to_y_channel(img1)
65
+ img2 = to_y_channel(img2)
66
+
67
+ mse = np.mean((img1 - img2)**2)
68
+ if mse == 0:
69
+ return float('inf')
70
+ max_value = 1. if img1.max() <= 1 else 255.
71
+ return 20. * np.log10(max_value / np.sqrt(mse))
72
+
73
+ if img1.ndim == 3 and img1.shape[2] == 6:
74
+ l1, r1 = img1[:,:,:3], img1[:,:,3:]
75
+ l2, r2 = img2[:,:,:3], img2[:,:,3:]
76
+ return (_psnr(l1, l2) + _psnr(r1, r2))/2
77
+ else:
78
+ return _psnr(img1, img2)
79
+
80
+ def calculate_psnr_left(img1,
81
+ img2,
82
+ crop_border,
83
+ input_order='HWC',
84
+ test_y_channel=False):
85
+ assert input_order == 'HWC'
86
+ assert crop_border == 0
87
+
88
+ img1 = img1[:,64:,:3]
89
+ img2 = img2[:,64:,:3]
90
+ return calculate_psnr(img1=img1, img2=img2, crop_border=0, input_order=input_order, test_y_channel=test_y_channel)
91
+
92
+ def _ssim(img1, img2, max_value):
93
+ """Calculate SSIM (structural similarity) for one channel images.
94
+
95
+ It is called by func:`calculate_ssim`.
96
+
97
+ Args:
98
+ img1 (ndarray): Images with range [0, 255] with order 'HWC'.
99
+ img2 (ndarray): Images with range [0, 255] with order 'HWC'.
100
+
101
+ Returns:
102
+ float: ssim result.
103
+ """
104
+
105
+ C1 = (0.01 * max_value)**2
106
+ C2 = (0.03 * max_value)**2
107
+
108
+ img1 = img1.astype(np.float64)
109
+ img2 = img2.astype(np.float64)
110
+ kernel = cv2.getGaussianKernel(11, 1.5)
111
+ window = np.outer(kernel, kernel.transpose())
112
+
113
+ mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
114
+ mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
115
+ mu1_sq = mu1**2
116
+ mu2_sq = mu2**2
117
+ mu1_mu2 = mu1 * mu2
118
+ sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
119
+ sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
120
+ sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
121
+
122
+ ssim_map = ((2 * mu1_mu2 + C1) *
123
+ (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
124
+ (sigma1_sq + sigma2_sq + C2))
125
+ return ssim_map.mean()
126
+
127
+ def prepare_for_ssim(img, k):
128
+ import torch
129
+ with torch.no_grad():
130
+ img = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).float()
131
+ conv = torch.nn.Conv2d(1, 1, k, stride=1, padding=k//2, padding_mode='reflect')
132
+ conv.weight.requires_grad = False
133
+ conv.weight[:, :, :, :] = 1. / (k * k)
134
+
135
+ img = conv(img)
136
+
137
+ img = img.squeeze(0).squeeze(0)
138
+ img = img[0::k, 0::k]
139
+ return img.detach().cpu().numpy()
140
+
141
+ def prepare_for_ssim_rgb(img, k):
142
+ import torch
143
+ with torch.no_grad():
144
+ img = torch.from_numpy(img).float() #HxWx3
145
+
146
+ conv = torch.nn.Conv2d(1, 1, k, stride=1, padding=k // 2, padding_mode='reflect')
147
+ conv.weight.requires_grad = False
148
+ conv.weight[:, :, :, :] = 1. / (k * k)
149
+
150
+ new_img = []
151
+
152
+ for i in range(3):
153
+ new_img.append(conv(img[:, :, i].unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)[0::k, 0::k])
154
+
155
+ return torch.stack(new_img, dim=2).detach().cpu().numpy()
156
+
157
+ def _3d_gaussian_calculator(img, conv3d):
158
+ out = conv3d(img.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)
159
+ return out
160
+
161
+ def _generate_3d_gaussian_kernel():
162
+ kernel = cv2.getGaussianKernel(11, 1.5)
163
+ window = np.outer(kernel, kernel.transpose())
164
+ kernel_3 = cv2.getGaussianKernel(11, 1.5)
165
+ kernel = torch.tensor(np.stack([window * k for k in kernel_3], axis=0))
166
+ conv3d = torch.nn.Conv3d(1, 1, (11, 11, 11), stride=1, padding=(5, 5, 5), bias=False, padding_mode='replicate')
167
+ conv3d.weight.requires_grad = False
168
+ conv3d.weight[0, 0, :, :, :] = kernel
169
+ return conv3d
170
+
171
+ def _ssim_3d(img1, img2, max_value):
172
+ assert len(img1.shape) == 3 and len(img2.shape) == 3
173
+ """Calculate SSIM (structural similarity) for one channel images.
174
+
175
+ It is called by func:`calculate_ssim`.
176
+
177
+ Args:
178
+ img1 (ndarray): Images with range [0, 255]/[0, 1] with order 'HWC'.
179
+ img2 (ndarray): Images with range [0, 255]/[0, 1] with order 'HWC'.
180
+
181
+ Returns:
182
+ float: ssim result.
183
+ """
184
+ C1 = (0.01 * max_value) ** 2
185
+ C2 = (0.03 * max_value) ** 2
186
+ img1 = img1.astype(np.float64)
187
+ img2 = img2.astype(np.float64)
188
+
189
+ kernel = _generate_3d_gaussian_kernel().cuda()
190
+
191
+ img1 = torch.tensor(img1).float().cuda()
192
+ img2 = torch.tensor(img2).float().cuda()
193
+
194
+
195
+ mu1 = _3d_gaussian_calculator(img1, kernel)
196
+ mu2 = _3d_gaussian_calculator(img2, kernel)
197
+
198
+ mu1_sq = mu1 ** 2
199
+ mu2_sq = mu2 ** 2
200
+ mu1_mu2 = mu1 * mu2
201
+ sigma1_sq = _3d_gaussian_calculator(img1 ** 2, kernel) - mu1_sq
202
+ sigma2_sq = _3d_gaussian_calculator(img2 ** 2, kernel) - mu2_sq
203
+ sigma12 = _3d_gaussian_calculator(img1*img2, kernel) - mu1_mu2
204
+
205
+ ssim_map = ((2 * mu1_mu2 + C1) *
206
+ (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
207
+ (sigma1_sq + sigma2_sq + C2))
208
+ return float(ssim_map.mean())
209
+
210
+ def _ssim_cly(img1, img2):
211
+ assert len(img1.shape) == 2 and len(img2.shape) == 2
212
+ """Calculate SSIM (structural similarity) for one channel images.
213
+
214
+ It is called by func:`calculate_ssim`.
215
+
216
+ Args:
217
+ img1 (ndarray): Images with range [0, 255] with order 'HWC'.
218
+ img2 (ndarray): Images with range [0, 255] with order 'HWC'.
219
+
220
+ Returns:
221
+ float: ssim result.
222
+ """
223
+
224
+ C1 = (0.01 * 255)**2
225
+ C2 = (0.03 * 255)**2
226
+ img1 = img1.astype(np.float64)
227
+ img2 = img2.astype(np.float64)
228
+
229
+ kernel = cv2.getGaussianKernel(11, 1.5)
230
+ # print(kernel)
231
+ window = np.outer(kernel, kernel.transpose())
232
+
233
+ bt = cv2.BORDER_REPLICATE
234
+
235
+ mu1 = cv2.filter2D(img1, -1, window, borderType=bt)
236
+ mu2 = cv2.filter2D(img2, -1, window,borderType=bt)
237
+
238
+ mu1_sq = mu1**2
239
+ mu2_sq = mu2**2
240
+ mu1_mu2 = mu1 * mu2
241
+ sigma1_sq = cv2.filter2D(img1**2, -1, window, borderType=bt) - mu1_sq
242
+ sigma2_sq = cv2.filter2D(img2**2, -1, window, borderType=bt) - mu2_sq
243
+ sigma12 = cv2.filter2D(img1 * img2, -1, window, borderType=bt) - mu1_mu2
244
+
245
+ ssim_map = ((2 * mu1_mu2 + C1) *
246
+ (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
247
+ (sigma1_sq + sigma2_sq + C2))
248
+ return ssim_map.mean()
249
+
250
+
251
+ def calculate_ssim(img1,
252
+ img2,
253
+ crop_border,
254
+ input_order='HWC',
255
+ test_y_channel=False,
256
+ ssim3d=True):
257
+ """Calculate SSIM (structural similarity).
258
+
259
+ Ref:
260
+ Image quality assessment: From error visibility to structural similarity
261
+
262
+ The results are the same as that of the official released MATLAB code in
263
+ https://ece.uwaterloo.ca/~z70wang/research/ssim/.
264
+
265
+ For three-channel images, SSIM is calculated for each channel and then
266
+ averaged.
267
+
268
+ Args:
269
+ img1 (ndarray): Images with range [0, 255].
270
+ img2 (ndarray): Images with range [0, 255].
271
+ crop_border (int): Cropped pixels in each edge of an image. These
272
+ pixels are not involved in the SSIM calculation.
273
+ input_order (str): Whether the input order is 'HWC' or 'CHW'.
274
+ Default: 'HWC'.
275
+ test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
276
+
277
+ Returns:
278
+ float: ssim result.
279
+ """
280
+
281
+ assert img1.shape == img2.shape, (
282
+ f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
283
+ if input_order not in ['HWC', 'CHW']:
284
+ raise ValueError(
285
+ f'Wrong input_order {input_order}. Supported input_orders are '
286
+ '"HWC" and "CHW"')
287
+
288
+ if type(img1) == torch.Tensor:
289
+ if len(img1.shape) == 4:
290
+ img1 = img1.squeeze(0)
291
+ img1 = img1.detach().cpu().numpy().transpose(1,2,0)
292
+ if type(img2) == torch.Tensor:
293
+ if len(img2.shape) == 4:
294
+ img2 = img2.squeeze(0)
295
+ img2 = img2.detach().cpu().numpy().transpose(1,2,0)
296
+
297
+ img1 = reorder_image(img1, input_order=input_order)
298
+ img2 = reorder_image(img2, input_order=input_order)
299
+
300
+ img1 = img1.astype(np.float64)
301
+ img2 = img2.astype(np.float64)
302
+
303
+ if crop_border != 0:
304
+ img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
305
+ img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
306
+
307
+ def _cal_ssim(img1, img2):
308
+ if test_y_channel:
309
+ img1 = to_y_channel(img1)
310
+ img2 = to_y_channel(img2)
311
+ return _ssim_cly(img1[..., 0], img2[..., 0])
312
+
313
+ ssims = []
314
+ # ssims_before = []
315
+
316
+ # skimage_before = skimage.metrics.structural_similarity(img1, img2, data_range=255., multichannel=True)
317
+ # print('.._skimage',
318
+ # skimage.metrics.structural_similarity(img1, img2, data_range=255., multichannel=True))
319
+ max_value = 1 if img1.max() <= 1 else 255
320
+ with torch.no_grad():
321
+ final_ssim = _ssim_3d(img1, img2, max_value) if ssim3d else _ssim(img1, img2, max_value)
322
+ ssims.append(final_ssim)
323
+
324
+ # for i in range(img1.shape[2]):
325
+ # ssims_before.append(_ssim(img1, img2))
326
+
327
+ # print('..ssim mean , new {:.4f} and before {:.4f} .... skimage before {:.4f}'.format(np.array(ssims).mean(), np.array(ssims_before).mean(), skimage_before))
328
+ # ssims.append(skimage.metrics.structural_similarity(img1[..., i], img2[..., i], multichannel=False))
329
+
330
+ return np.array(ssims).mean()
331
+
332
+ if img1.ndim == 3 and img1.shape[2] == 6:
333
+ l1, r1 = img1[:,:,:3], img1[:,:,3:]
334
+ l2, r2 = img2[:,:,:3], img2[:,:,3:]
335
+ return (_cal_ssim(l1, l2) + _cal_ssim(r1, r2))/2
336
+ else:
337
+ return _cal_ssim(img1, img2)
338
+
339
+ def calculate_ssim_left(img1,
340
+ img2,
341
+ crop_border,
342
+ input_order='HWC',
343
+ test_y_channel=False,
344
+ ssim3d=True):
345
+ assert input_order == 'HWC'
346
+ assert crop_border == 0
347
+
348
+ img1 = img1[:,64:,:3]
349
+ img2 = img2[:,64:,:3]
350
+ return calculate_ssim(img1=img1, img2=img2, crop_border=0, input_order=input_order, test_y_channel=test_y_channel, ssim3d=ssim3d)
351
+
352
+ def calculate_skimage_ssim(img1, img2):
353
+ return structural_similarity(img1, img2, multichannel=True)
354
+
355
+ def calculate_skimage_ssim_left(img1, img2):
356
+ img1 = img1[:,64:,:3]
357
+ img2 = img2[:,64:,:3]
358
+ return calculate_skimage_ssim(img1=img1, img2=img2)
NAFNet/basicsr/models/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import importlib
8
+ from os import path as osp
9
+
10
+ from basicsr.utils import get_root_logger, scandir
11
+
12
+ # automatically scan and import model modules
13
+ # scan all the files under the 'models' folder and collect files ending with
14
+ # '_model.py'
15
+ model_folder = osp.dirname(osp.abspath(__file__))
16
+ model_filenames = [
17
+ osp.splitext(osp.basename(v))[0] for v in scandir(model_folder)
18
+ if v.endswith('_model.py')
19
+ ]
20
+ # import all the model modules
21
+ _model_modules = [
22
+ importlib.import_module(f'basicsr.models.{file_name}')
23
+ for file_name in model_filenames
24
+ ]
25
+
26
+
27
+ def create_model(opt):
28
+ """Create model.
29
+
30
+ Args:
31
+ opt (dict): Configuration. It constains:
32
+ model_type (str): Model type.
33
+ """
34
+ model_type = opt['model_type']
35
+
36
+ # dynamic instantiation
37
+ for module in _model_modules:
38
+ model_cls = getattr(module, model_type, None)
39
+ if model_cls is not None:
40
+ break
41
+ if model_cls is None:
42
+ raise ValueError(f'Model {model_type} is not found.')
43
+
44
+ model = model_cls(opt)
45
+
46
+ logger = get_root_logger()
47
+ logger.info(f'Model [{model.__class__.__name__}] is created.')
48
+ return model
NAFNet/basicsr/models/archs/Baseline_arch.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+
5
+ '''
6
+ Simple Baselines for Image Restoration
7
+
8
+ @article{chen2022simple,
9
+ title={Simple Baselines for Image Restoration},
10
+ author={Chen, Liangyu and Chu, Xiaojie and Zhang, Xiangyu and Sun, Jian},
11
+ journal={arXiv preprint arXiv:2204.04676},
12
+ year={2022}
13
+ }
14
+ '''
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
+ from basicsr.models.archs.arch_util import LayerNorm2d
20
+ from basicsr.models.archs.local_arch import Local_Base
21
+
22
+ class BaselineBlock(nn.Module):
23
+ def __init__(self, c, DW_Expand=1, FFN_Expand=2, drop_out_rate=0.):
24
+ super().__init__()
25
+ dw_channel = c * DW_Expand
26
+ self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
27
+ self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel,
28
+ bias=True)
29
+ self.conv3 = nn.Conv2d(in_channels=dw_channel, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
30
+
31
+ # Channel Attention
32
+ self.se = nn.Sequential(
33
+ nn.AdaptiveAvgPool2d(1),
34
+ nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1,
35
+ groups=1, bias=True),
36
+ nn.ReLU(inplace=True),
37
+ nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel, kernel_size=1, padding=0, stride=1,
38
+ groups=1, bias=True),
39
+ nn.Sigmoid()
40
+ )
41
+
42
+ # GELU
43
+ self.gelu = nn.GELU()
44
+
45
+ ffn_channel = FFN_Expand * c
46
+ self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
47
+ self.conv5 = nn.Conv2d(in_channels=ffn_channel, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
48
+
49
+ self.norm1 = LayerNorm2d(c)
50
+ self.norm2 = LayerNorm2d(c)
51
+
52
+ self.dropout1 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity()
53
+ self.dropout2 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity()
54
+
55
+ self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
56
+ self.gamma = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
57
+
58
+ def forward(self, inp):
59
+ x = inp
60
+
61
+ x = self.norm1(x)
62
+
63
+ x = self.conv1(x)
64
+ x = self.conv2(x)
65
+ x = self.gelu(x)
66
+ x = x * self.se(x)
67
+ x = self.conv3(x)
68
+
69
+ x = self.dropout1(x)
70
+
71
+ y = inp + x * self.beta
72
+
73
+ x = self.conv4(self.norm2(y))
74
+ x = self.gelu(x)
75
+ x = self.conv5(x)
76
+
77
+ x = self.dropout2(x)
78
+
79
+ return y + x * self.gamma
80
+
81
+
82
+ class Baseline(nn.Module):
83
+
84
+ def __init__(self, img_channel=3, width=16, middle_blk_num=1, enc_blk_nums=[], dec_blk_nums=[], dw_expand=1, ffn_expand=2):
85
+ super().__init__()
86
+
87
+ self.intro = nn.Conv2d(in_channels=img_channel, out_channels=width, kernel_size=3, padding=1, stride=1, groups=1,
88
+ bias=True)
89
+ self.ending = nn.Conv2d(in_channels=width, out_channels=img_channel, kernel_size=3, padding=1, stride=1, groups=1,
90
+ bias=True)
91
+
92
+ self.encoders = nn.ModuleList()
93
+ self.decoders = nn.ModuleList()
94
+ self.middle_blks = nn.ModuleList()
95
+ self.ups = nn.ModuleList()
96
+ self.downs = nn.ModuleList()
97
+
98
+ chan = width
99
+ for num in enc_blk_nums:
100
+ self.encoders.append(
101
+ nn.Sequential(
102
+ *[BaselineBlock(chan, dw_expand, ffn_expand) for _ in range(num)]
103
+ )
104
+ )
105
+ self.downs.append(
106
+ nn.Conv2d(chan, 2*chan, 2, 2)
107
+ )
108
+ chan = chan * 2
109
+
110
+ self.middle_blks = \
111
+ nn.Sequential(
112
+ *[BaselineBlock(chan, dw_expand, ffn_expand) for _ in range(middle_blk_num)]
113
+ )
114
+
115
+ for num in dec_blk_nums:
116
+ self.ups.append(
117
+ nn.Sequential(
118
+ nn.Conv2d(chan, chan * 2, 1, bias=False),
119
+ nn.PixelShuffle(2)
120
+ )
121
+ )
122
+ chan = chan // 2
123
+ self.decoders.append(
124
+ nn.Sequential(
125
+ *[BaselineBlock(chan, dw_expand, ffn_expand) for _ in range(num)]
126
+ )
127
+ )
128
+
129
+ self.padder_size = 2 ** len(self.encoders)
130
+
131
+ def forward(self, inp):
132
+ B, C, H, W = inp.shape
133
+ inp = self.check_image_size(inp)
134
+
135
+ x = self.intro(inp)
136
+
137
+ encs = []
138
+
139
+ for encoder, down in zip(self.encoders, self.downs):
140
+ x = encoder(x)
141
+ encs.append(x)
142
+ x = down(x)
143
+
144
+ x = self.middle_blks(x)
145
+
146
+ for decoder, up, enc_skip in zip(self.decoders, self.ups, encs[::-1]):
147
+ x = up(x)
148
+ x = x + enc_skip
149
+ x = decoder(x)
150
+
151
+ x = self.ending(x)
152
+ x = x + inp
153
+
154
+ return x[:, :, :H, :W]
155
+
156
+ def check_image_size(self, x):
157
+ _, _, h, w = x.size()
158
+ mod_pad_h = (self.padder_size - h % self.padder_size) % self.padder_size
159
+ mod_pad_w = (self.padder_size - w % self.padder_size) % self.padder_size
160
+ x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h))
161
+ return x
162
+
163
+ class BaselineLocal(Local_Base, Baseline):
164
+ def __init__(self, *args, train_size=(1, 3, 256, 256), fast_imp=False, **kwargs):
165
+ Local_Base.__init__(self)
166
+ Baseline.__init__(self, *args, **kwargs)
167
+
168
+ N, C, H, W = train_size
169
+ base_size = (int(H * 1.5), int(W * 1.5))
170
+
171
+ self.eval()
172
+ with torch.no_grad():
173
+ self.convert(base_size=base_size, train_size=train_size, fast_imp=fast_imp)
174
+
175
+ if __name__ == '__main__':
176
+ img_channel = 3
177
+ width = 32
178
+
179
+ dw_expand = 1
180
+ ffn_expand = 2
181
+
182
+ # enc_blks = [2, 2, 4, 8]
183
+ # middle_blk_num = 12
184
+ # dec_blks = [2, 2, 2, 2]
185
+
186
+ enc_blks = [1, 1, 1, 28]
187
+ middle_blk_num = 1
188
+ dec_blks = [1, 1, 1, 1]
189
+
190
+ net = Baseline(img_channel=img_channel, width=width, middle_blk_num=middle_blk_num,
191
+ enc_blk_nums=enc_blks, dec_blk_nums=dec_blks, dw_expand=dw_expand, ffn_expand=ffn_expand)
192
+
193
+ inp_shape = (3, 256, 256)
194
+
195
+ from ptflops import get_model_complexity_info
196
+
197
+ macs, params = get_model_complexity_info(net, inp_shape, verbose=False, print_per_layer_stat=False)
198
+
199
+ params = float(params[:-3])
200
+ macs = float(macs[:-4])
201
+
202
+ print(macs, params)
NAFNet/basicsr/models/archs/NAFNet_arch.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+
5
+ '''
6
+ Simple Baselines for Image Restoration
7
+
8
+ @article{chen2022simple,
9
+ title={Simple Baselines for Image Restoration},
10
+ author={Chen, Liangyu and Chu, Xiaojie and Zhang, Xiangyu and Sun, Jian},
11
+ journal={arXiv preprint arXiv:2204.04676},
12
+ year={2022}
13
+ }
14
+ '''
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
+ from basicsr.models.archs.arch_util import LayerNorm2d
20
+ from basicsr.models.archs.local_arch import Local_Base
21
+
22
+ class SimpleGate(nn.Module):
23
+ def forward(self, x):
24
+ x1, x2 = x.chunk(2, dim=1)
25
+ return x1 * x2
26
+
27
+ class NAFBlock(nn.Module):
28
+ def __init__(self, c, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.):
29
+ super().__init__()
30
+ dw_channel = c * DW_Expand
31
+ self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
32
+ self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel,
33
+ bias=True)
34
+ self.conv3 = nn.Conv2d(in_channels=dw_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
35
+
36
+ # Simplified Channel Attention
37
+ self.sca = nn.Sequential(
38
+ nn.AdaptiveAvgPool2d(1),
39
+ nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1,
40
+ groups=1, bias=True),
41
+ )
42
+
43
+ # SimpleGate
44
+ self.sg = SimpleGate()
45
+
46
+ ffn_channel = FFN_Expand * c
47
+ self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
48
+ self.conv5 = nn.Conv2d(in_channels=ffn_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
49
+
50
+ self.norm1 = LayerNorm2d(c)
51
+ self.norm2 = LayerNorm2d(c)
52
+
53
+ self.dropout1 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity()
54
+ self.dropout2 = nn.Dropout(drop_out_rate) if drop_out_rate > 0. else nn.Identity()
55
+
56
+ self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
57
+ self.gamma = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
58
+
59
+ def forward(self, inp):
60
+ x = inp
61
+
62
+ x = self.norm1(x)
63
+
64
+ x = self.conv1(x)
65
+ x = self.conv2(x)
66
+ x = self.sg(x)
67
+ x = x * self.sca(x)
68
+ x = self.conv3(x)
69
+
70
+ x = self.dropout1(x)
71
+
72
+ y = inp + x * self.beta
73
+
74
+ x = self.conv4(self.norm2(y))
75
+ x = self.sg(x)
76
+ x = self.conv5(x)
77
+
78
+ x = self.dropout2(x)
79
+
80
+ return y + x * self.gamma
81
+
82
+
83
+ class NAFNet(nn.Module):
84
+
85
+ def __init__(self, img_channel=3, width=16, middle_blk_num=1, enc_blk_nums=[], dec_blk_nums=[]):
86
+ super().__init__()
87
+
88
+ self.intro = nn.Conv2d(in_channels=img_channel, out_channels=width, kernel_size=3, padding=1, stride=1, groups=1,
89
+ bias=True)
90
+ self.ending = nn.Conv2d(in_channels=width, out_channels=img_channel, kernel_size=3, padding=1, stride=1, groups=1,
91
+ bias=True)
92
+
93
+ self.encoders = nn.ModuleList()
94
+ self.decoders = nn.ModuleList()
95
+ self.middle_blks = nn.ModuleList()
96
+ self.ups = nn.ModuleList()
97
+ self.downs = nn.ModuleList()
98
+
99
+ chan = width
100
+ for num in enc_blk_nums:
101
+ self.encoders.append(
102
+ nn.Sequential(
103
+ *[NAFBlock(chan) for _ in range(num)]
104
+ )
105
+ )
106
+ self.downs.append(
107
+ nn.Conv2d(chan, 2*chan, 2, 2)
108
+ )
109
+ chan = chan * 2
110
+
111
+ self.middle_blks = \
112
+ nn.Sequential(
113
+ *[NAFBlock(chan) for _ in range(middle_blk_num)]
114
+ )
115
+
116
+ for num in dec_blk_nums:
117
+ self.ups.append(
118
+ nn.Sequential(
119
+ nn.Conv2d(chan, chan * 2, 1, bias=False),
120
+ nn.PixelShuffle(2)
121
+ )
122
+ )
123
+ chan = chan // 2
124
+ self.decoders.append(
125
+ nn.Sequential(
126
+ *[NAFBlock(chan) for _ in range(num)]
127
+ )
128
+ )
129
+
130
+ self.padder_size = 2 ** len(self.encoders)
131
+
132
+ def forward(self, inp):
133
+ B, C, H, W = inp.shape
134
+ inp = self.check_image_size(inp)
135
+
136
+ x = self.intro(inp)
137
+
138
+ encs = []
139
+
140
+ for encoder, down in zip(self.encoders, self.downs):
141
+ x = encoder(x)
142
+ encs.append(x)
143
+ x = down(x)
144
+
145
+ x = self.middle_blks(x)
146
+
147
+ for decoder, up, enc_skip in zip(self.decoders, self.ups, encs[::-1]):
148
+ x = up(x)
149
+ x = x + enc_skip
150
+ x = decoder(x)
151
+
152
+ x = self.ending(x)
153
+ x = x + inp
154
+
155
+ return x[:, :, :H, :W]
156
+
157
+ def check_image_size(self, x):
158
+ _, _, h, w = x.size()
159
+ mod_pad_h = (self.padder_size - h % self.padder_size) % self.padder_size
160
+ mod_pad_w = (self.padder_size - w % self.padder_size) % self.padder_size
161
+ x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h))
162
+ return x
163
+
164
+ class NAFNetLocal(Local_Base, NAFNet):
165
+ def __init__(self, *args, train_size=(1, 3, 256, 256), fast_imp=False, **kwargs):
166
+ Local_Base.__init__(self)
167
+ NAFNet.__init__(self, *args, **kwargs)
168
+
169
+ N, C, H, W = train_size
170
+ base_size = (int(H * 1.5), int(W * 1.5))
171
+
172
+ self.eval()
173
+ with torch.no_grad():
174
+ self.convert(base_size=base_size, train_size=train_size, fast_imp=fast_imp)
175
+
176
+
177
+ if __name__ == '__main__':
178
+ img_channel = 3
179
+ width = 32
180
+
181
+ # enc_blks = [2, 2, 4, 8]
182
+ # middle_blk_num = 12
183
+ # dec_blks = [2, 2, 2, 2]
184
+
185
+ enc_blks = [1, 1, 1, 28]
186
+ middle_blk_num = 1
187
+ dec_blks = [1, 1, 1, 1]
188
+
189
+ net = NAFNet(img_channel=img_channel, width=width, middle_blk_num=middle_blk_num,
190
+ enc_blk_nums=enc_blks, dec_blk_nums=dec_blks)
191
+
192
+
193
+ inp_shape = (3, 256, 256)
194
+
195
+ from ptflops import get_model_complexity_info
196
+
197
+ macs, params = get_model_complexity_info(net, inp_shape, verbose=False, print_per_layer_stat=False)
198
+
199
+ params = float(params[:-3])
200
+ macs = float(macs[:-4])
201
+
202
+ print(macs, params)
NAFNet/basicsr/models/archs/NAFSSR_arch.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+
5
+ '''
6
+ NAFSSR: Stereo Image Super-Resolution Using NAFNet
7
+
8
+ @InProceedings{Chu2022NAFSSR,
9
+ author = {Xiaojie Chu and Liangyu Chen and Wenqing Yu},
10
+ title = {NAFSSR: Stereo Image Super-Resolution Using NAFNet},
11
+ booktitle = {CVPRW},
12
+ year = {2022},
13
+ }
14
+ '''
15
+
16
+ import numpy as np
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+
21
+ from basicsr.models.archs.NAFNet_arch import LayerNorm2d, NAFBlock
22
+ from basicsr.models.archs.arch_util import MySequential
23
+ from basicsr.models.archs.local_arch import Local_Base
24
+
25
+ class SCAM(nn.Module):
26
+ '''
27
+ Stereo Cross Attention Module (SCAM)
28
+ '''
29
+ def __init__(self, c):
30
+ super().__init__()
31
+ self.scale = c ** -0.5
32
+
33
+ self.norm_l = LayerNorm2d(c)
34
+ self.norm_r = LayerNorm2d(c)
35
+ self.l_proj1 = nn.Conv2d(c, c, kernel_size=1, stride=1, padding=0)
36
+ self.r_proj1 = nn.Conv2d(c, c, kernel_size=1, stride=1, padding=0)
37
+
38
+ self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
39
+ self.gamma = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
40
+
41
+ self.l_proj2 = nn.Conv2d(c, c, kernel_size=1, stride=1, padding=0)
42
+ self.r_proj2 = nn.Conv2d(c, c, kernel_size=1, stride=1, padding=0)
43
+
44
+ def forward(self, x_l, x_r):
45
+ Q_l = self.l_proj1(self.norm_l(x_l)).permute(0, 2, 3, 1) # B, H, W, c
46
+ Q_r_T = self.r_proj1(self.norm_r(x_r)).permute(0, 2, 1, 3) # B, H, c, W (transposed)
47
+
48
+ V_l = self.l_proj2(x_l).permute(0, 2, 3, 1) # B, H, W, c
49
+ V_r = self.r_proj2(x_r).permute(0, 2, 3, 1) # B, H, W, c
50
+
51
+ # (B, H, W, c) x (B, H, c, W) -> (B, H, W, W)
52
+ attention = torch.matmul(Q_l, Q_r_T) * self.scale
53
+
54
+ F_r2l = torch.matmul(torch.softmax(attention, dim=-1), V_r) #B, H, W, c
55
+ F_l2r = torch.matmul(torch.softmax(attention.permute(0, 1, 3, 2), dim=-1), V_l) #B, H, W, c
56
+
57
+ # scale
58
+ F_r2l = F_r2l.permute(0, 3, 1, 2) * self.beta
59
+ F_l2r = F_l2r.permute(0, 3, 1, 2) * self.gamma
60
+ return x_l + F_r2l, x_r + F_l2r
61
+
62
+ class DropPath(nn.Module):
63
+ def __init__(self, drop_rate, module):
64
+ super().__init__()
65
+ self.drop_rate = drop_rate
66
+ self.module = module
67
+
68
+ def forward(self, *feats):
69
+ if self.training and np.random.rand() < self.drop_rate:
70
+ return feats
71
+
72
+ new_feats = self.module(*feats)
73
+ factor = 1. / (1 - self.drop_rate) if self.training else 1.
74
+
75
+ if self.training and factor != 1.:
76
+ new_feats = tuple([x+factor*(new_x-x) for x, new_x in zip(feats, new_feats)])
77
+ return new_feats
78
+
79
+ class NAFBlockSR(nn.Module):
80
+ '''
81
+ NAFBlock for Super-Resolution
82
+ '''
83
+ def __init__(self, c, fusion=False, drop_out_rate=0.):
84
+ super().__init__()
85
+ self.blk = NAFBlock(c, drop_out_rate=drop_out_rate)
86
+ self.fusion = SCAM(c) if fusion else None
87
+
88
+ def forward(self, *feats):
89
+ feats = tuple([self.blk(x) for x in feats])
90
+ if self.fusion:
91
+ feats = self.fusion(*feats)
92
+ return feats
93
+
94
+ class NAFNetSR(nn.Module):
95
+ '''
96
+ NAFNet for Super-Resolution
97
+ '''
98
+ def __init__(self, up_scale=4, width=48, num_blks=16, img_channel=3, drop_path_rate=0., drop_out_rate=0., fusion_from=-1, fusion_to=-1, dual=False):
99
+ super().__init__()
100
+ self.dual = dual # dual input for stereo SR (left view, right view)
101
+ self.intro = nn.Conv2d(in_channels=img_channel, out_channels=width, kernel_size=3, padding=1, stride=1, groups=1,
102
+ bias=True)
103
+ self.body = MySequential(
104
+ *[DropPath(
105
+ drop_path_rate,
106
+ NAFBlockSR(
107
+ width,
108
+ fusion=(fusion_from <= i and i <= fusion_to),
109
+ drop_out_rate=drop_out_rate
110
+ )) for i in range(num_blks)]
111
+ )
112
+
113
+ self.up = nn.Sequential(
114
+ nn.Conv2d(in_channels=width, out_channels=img_channel * up_scale**2, kernel_size=3, padding=1, stride=1, groups=1, bias=True),
115
+ nn.PixelShuffle(up_scale)
116
+ )
117
+ self.up_scale = up_scale
118
+
119
+ def forward(self, inp):
120
+ inp_hr = F.interpolate(inp, scale_factor=self.up_scale, mode='bilinear')
121
+ if self.dual:
122
+ inp = inp.chunk(2, dim=1)
123
+ else:
124
+ inp = (inp, )
125
+ feats = [self.intro(x) for x in inp]
126
+ feats = self.body(*feats)
127
+ out = torch.cat([self.up(x) for x in feats], dim=1)
128
+ out = out + inp_hr
129
+ return out
130
+
131
+ class NAFSSR(Local_Base, NAFNetSR):
132
+ def __init__(self, *args, train_size=(1, 6, 30, 90), fast_imp=False, fusion_from=-1, fusion_to=1000, **kwargs):
133
+ Local_Base.__init__(self)
134
+ NAFNetSR.__init__(self, *args, img_channel=3, fusion_from=fusion_from, fusion_to=fusion_to, dual=True, **kwargs)
135
+
136
+ N, C, H, W = train_size
137
+ base_size = (int(H * 1.5), int(W * 1.5))
138
+
139
+ self.eval()
140
+ with torch.no_grad():
141
+ self.convert(base_size=base_size, train_size=train_size, fast_imp=fast_imp)
142
+
143
+ if __name__ == '__main__':
144
+ num_blks = 128
145
+ width = 128
146
+ droppath=0.1
147
+ train_size = (1, 6, 30, 90)
148
+
149
+ net = NAFSSR(up_scale=2,train_size=train_size, fast_imp=True, width=width, num_blks=num_blks, drop_path_rate=droppath)
150
+
151
+ inp_shape = (6, 64, 64)
152
+
153
+ from ptflops import get_model_complexity_info
154
+ FLOPS = 0
155
+ macs, params = get_model_complexity_info(net, inp_shape, verbose=False, print_per_layer_stat=True)
156
+
157
+ # params = float(params[:-4])
158
+ print(params)
159
+ macs = float(macs[:-4]) + FLOPS / 10 ** 9
160
+
161
+ print('mac', macs, params)
162
+
163
+ # from basicsr.models.archs.arch_util import measure_inference_speed
164
+ # net = net.cuda()
165
+ # data = torch.randn((1, 6, 128, 128)).cuda()
166
+ # measure_inference_speed(net, (data,))
167
+
168
+
169
+
170
+
NAFNet/basicsr/models/archs/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import importlib
8
+ from os import path as osp
9
+
10
+ from basicsr.utils import scandir
11
+
12
+ # automatically scan and import arch modules
13
+ # scan all the files under the 'archs' folder and collect files ending with
14
+ # '_arch.py'
15
+ arch_folder = osp.dirname(osp.abspath(__file__))
16
+ arch_filenames = [
17
+ osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder)
18
+ if v.endswith('_arch.py')
19
+ ]
20
+ # import all the arch modules
21
+ _arch_modules = [
22
+ importlib.import_module(f'basicsr.models.archs.{file_name}')
23
+ for file_name in arch_filenames
24
+ ]
25
+
26
+
27
+ def dynamic_instantiation(modules, cls_type, opt):
28
+ """Dynamically instantiate class.
29
+
30
+ Args:
31
+ modules (list[importlib modules]): List of modules from importlib
32
+ files.
33
+ cls_type (str): Class type.
34
+ opt (dict): Class initialization kwargs.
35
+
36
+ Returns:
37
+ class: Instantiated class.
38
+ """
39
+
40
+ for module in modules:
41
+ cls_ = getattr(module, cls_type, None)
42
+ if cls_ is not None:
43
+ break
44
+ if cls_ is None:
45
+ raise ValueError(f'{cls_type} is not found.')
46
+ return cls_(**opt)
47
+
48
+
49
+ def define_network(opt):
50
+ network_type = opt.pop('type')
51
+ net = dynamic_instantiation(_arch_modules, network_type, opt)
52
+ return net
NAFNet/basicsr/models/archs/arch_util.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import math
8
+ import torch
9
+ from torch import nn as nn
10
+ from torch.nn import functional as F
11
+ from torch.nn import init as init
12
+ from torch.nn.modules.batchnorm import _BatchNorm
13
+
14
+ from basicsr.utils import get_root_logger
15
+
16
+ # try:
17
+ # from basicsr.models.ops.dcn import (ModulatedDeformConvPack,
18
+ # modulated_deform_conv)
19
+ # except ImportError:
20
+ # # print('Cannot import dcn. Ignore this warning if dcn is not used. '
21
+ # # 'Otherwise install BasicSR with compiling dcn.')
22
+ #
23
+
24
+ @torch.no_grad()
25
+ def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
26
+ """Initialize network weights.
27
+
28
+ Args:
29
+ module_list (list[nn.Module] | nn.Module): Modules to be initialized.
30
+ scale (float): Scale initialized weights, especially for residual
31
+ blocks. Default: 1.
32
+ bias_fill (float): The value to fill bias. Default: 0
33
+ kwargs (dict): Other arguments for initialization function.
34
+ """
35
+ if not isinstance(module_list, list):
36
+ module_list = [module_list]
37
+ for module in module_list:
38
+ for m in module.modules():
39
+ if isinstance(m, nn.Conv2d):
40
+ init.kaiming_normal_(m.weight, **kwargs)
41
+ m.weight.data *= scale
42
+ if m.bias is not None:
43
+ m.bias.data.fill_(bias_fill)
44
+ elif isinstance(m, nn.Linear):
45
+ init.kaiming_normal_(m.weight, **kwargs)
46
+ m.weight.data *= scale
47
+ if m.bias is not None:
48
+ m.bias.data.fill_(bias_fill)
49
+ elif isinstance(m, _BatchNorm):
50
+ init.constant_(m.weight, 1)
51
+ if m.bias is not None:
52
+ m.bias.data.fill_(bias_fill)
53
+
54
+
55
+ def make_layer(basic_block, num_basic_block, **kwarg):
56
+ """Make layers by stacking the same blocks.
57
+
58
+ Args:
59
+ basic_block (nn.module): nn.module class for basic block.
60
+ num_basic_block (int): number of blocks.
61
+
62
+ Returns:
63
+ nn.Sequential: Stacked blocks in nn.Sequential.
64
+ """
65
+ layers = []
66
+ for _ in range(num_basic_block):
67
+ layers.append(basic_block(**kwarg))
68
+ return nn.Sequential(*layers)
69
+
70
+
71
+ class ResidualBlockNoBN(nn.Module):
72
+ """Residual block without BN.
73
+
74
+ It has a style of:
75
+ ---Conv-ReLU-Conv-+-
76
+ |________________|
77
+
78
+ Args:
79
+ num_feat (int): Channel number of intermediate features.
80
+ Default: 64.
81
+ res_scale (float): Residual scale. Default: 1.
82
+ pytorch_init (bool): If set to True, use pytorch default init,
83
+ otherwise, use default_init_weights. Default: False.
84
+ """
85
+
86
+ def __init__(self, num_feat=64, res_scale=1, pytorch_init=False):
87
+ super(ResidualBlockNoBN, self).__init__()
88
+ self.res_scale = res_scale
89
+ self.conv1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
90
+ self.conv2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=True)
91
+ self.relu = nn.ReLU(inplace=True)
92
+
93
+ if not pytorch_init:
94
+ default_init_weights([self.conv1, self.conv2], 0.1)
95
+
96
+ def forward(self, x):
97
+ identity = x
98
+ out = self.conv2(self.relu(self.conv1(x)))
99
+ return identity + out * self.res_scale
100
+
101
+
102
+ class Upsample(nn.Sequential):
103
+ """Upsample module.
104
+
105
+ Args:
106
+ scale (int): Scale factor. Supported scales: 2^n and 3.
107
+ num_feat (int): Channel number of intermediate features.
108
+ """
109
+
110
+ def __init__(self, scale, num_feat):
111
+ m = []
112
+ if (scale & (scale - 1)) == 0: # scale = 2^n
113
+ for _ in range(int(math.log(scale, 2))):
114
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
115
+ m.append(nn.PixelShuffle(2))
116
+ elif scale == 3:
117
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
118
+ m.append(nn.PixelShuffle(3))
119
+ else:
120
+ raise ValueError(f'scale {scale} is not supported. '
121
+ 'Supported scales: 2^n and 3.')
122
+ super(Upsample, self).__init__(*m)
123
+
124
+
125
+ def flow_warp(x,
126
+ flow,
127
+ interp_mode='bilinear',
128
+ padding_mode='zeros',
129
+ align_corners=True):
130
+ """Warp an image or feature map with optical flow.
131
+
132
+ Args:
133
+ x (Tensor): Tensor with size (n, c, h, w).
134
+ flow (Tensor): Tensor with size (n, h, w, 2), normal value.
135
+ interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'.
136
+ padding_mode (str): 'zeros' or 'border' or 'reflection'.
137
+ Default: 'zeros'.
138
+ align_corners (bool): Before pytorch 1.3, the default value is
139
+ align_corners=True. After pytorch 1.3, the default value is
140
+ align_corners=False. Here, we use the True as default.
141
+
142
+ Returns:
143
+ Tensor: Warped image or feature map.
144
+ """
145
+ assert x.size()[-2:] == flow.size()[1:3]
146
+ _, _, h, w = x.size()
147
+ # create mesh grid
148
+ grid_y, grid_x = torch.meshgrid(
149
+ torch.arange(0, h).type_as(x),
150
+ torch.arange(0, w).type_as(x))
151
+ grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
152
+ grid.requires_grad = False
153
+
154
+ vgrid = grid + flow
155
+ # scale grid to [-1,1]
156
+ vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
157
+ vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
158
+ vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
159
+ output = F.grid_sample(
160
+ x,
161
+ vgrid_scaled,
162
+ mode=interp_mode,
163
+ padding_mode=padding_mode,
164
+ align_corners=align_corners)
165
+
166
+ # TODO, what if align_corners=False
167
+ return output
168
+
169
+
170
+ def resize_flow(flow,
171
+ size_type,
172
+ sizes,
173
+ interp_mode='bilinear',
174
+ align_corners=False):
175
+ """Resize a flow according to ratio or shape.
176
+
177
+ Args:
178
+ flow (Tensor): Precomputed flow. shape [N, 2, H, W].
179
+ size_type (str): 'ratio' or 'shape'.
180
+ sizes (list[int | float]): the ratio for resizing or the final output
181
+ shape.
182
+ 1) The order of ratio should be [ratio_h, ratio_w]. For
183
+ downsampling, the ratio should be smaller than 1.0 (i.e., ratio
184
+ < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,
185
+ ratio > 1.0).
186
+ 2) The order of output_size should be [out_h, out_w].
187
+ interp_mode (str): The mode of interpolation for resizing.
188
+ Default: 'bilinear'.
189
+ align_corners (bool): Whether align corners. Default: False.
190
+
191
+ Returns:
192
+ Tensor: Resized flow.
193
+ """
194
+ _, _, flow_h, flow_w = flow.size()
195
+ if size_type == 'ratio':
196
+ output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])
197
+ elif size_type == 'shape':
198
+ output_h, output_w = sizes[0], sizes[1]
199
+ else:
200
+ raise ValueError(
201
+ f'Size type should be ratio or shape, but got type {size_type}.')
202
+
203
+ input_flow = flow.clone()
204
+ ratio_h = output_h / flow_h
205
+ ratio_w = output_w / flow_w
206
+ input_flow[:, 0, :, :] *= ratio_w
207
+ input_flow[:, 1, :, :] *= ratio_h
208
+ resized_flow = F.interpolate(
209
+ input=input_flow,
210
+ size=(output_h, output_w),
211
+ mode=interp_mode,
212
+ align_corners=align_corners)
213
+ return resized_flow
214
+
215
+
216
+ # TODO: may write a cpp file
217
+ def pixel_unshuffle(x, scale):
218
+ """ Pixel unshuffle.
219
+
220
+ Args:
221
+ x (Tensor): Input feature with shape (b, c, hh, hw).
222
+ scale (int): Downsample ratio.
223
+
224
+ Returns:
225
+ Tensor: the pixel unshuffled feature.
226
+ """
227
+ b, c, hh, hw = x.size()
228
+ out_channel = c * (scale**2)
229
+ assert hh % scale == 0 and hw % scale == 0
230
+ h = hh // scale
231
+ w = hw // scale
232
+ x_view = x.view(b, c, h, scale, w, scale)
233
+ return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
234
+
235
+
236
+ # class DCNv2Pack(ModulatedDeformConvPack):
237
+ # """Modulated deformable conv for deformable alignment.
238
+ #
239
+ # Different from the official DCNv2Pack, which generates offsets and masks
240
+ # from the preceding features, this DCNv2Pack takes another different
241
+ # features to generate offsets and masks.
242
+ #
243
+ # Ref:
244
+ # Delving Deep into Deformable Alignment in Video Super-Resolution.
245
+ # """
246
+ #
247
+ # def forward(self, x, feat):
248
+ # out = self.conv_offset(feat)
249
+ # o1, o2, mask = torch.chunk(out, 3, dim=1)
250
+ # offset = torch.cat((o1, o2), dim=1)
251
+ # mask = torch.sigmoid(mask)
252
+ #
253
+ # offset_absmean = torch.mean(torch.abs(offset))
254
+ # if offset_absmean > 50:
255
+ # logger = get_root_logger()
256
+ # logger.warning(
257
+ # f'Offset abs mean is {offset_absmean}, larger than 50.')
258
+ #
259
+ # return modulated_deform_conv(x, offset, mask, self.weight, self.bias,
260
+ # self.stride, self.padding, self.dilation,
261
+ # self.groups, self.deformable_groups)
262
+
263
+
264
+ class LayerNormFunction(torch.autograd.Function):
265
+
266
+ @staticmethod
267
+ def forward(ctx, x, weight, bias, eps):
268
+ ctx.eps = eps
269
+ N, C, H, W = x.size()
270
+ mu = x.mean(1, keepdim=True)
271
+ var = (x - mu).pow(2).mean(1, keepdim=True)
272
+ y = (x - mu) / (var + eps).sqrt()
273
+ ctx.save_for_backward(y, var, weight)
274
+ y = weight.view(1, C, 1, 1) * y + bias.view(1, C, 1, 1)
275
+ return y
276
+
277
+ @staticmethod
278
+ def backward(ctx, grad_output):
279
+ eps = ctx.eps
280
+
281
+ N, C, H, W = grad_output.size()
282
+ y, var, weight = ctx.saved_variables
283
+ g = grad_output * weight.view(1, C, 1, 1)
284
+ mean_g = g.mean(dim=1, keepdim=True)
285
+
286
+ mean_gy = (g * y).mean(dim=1, keepdim=True)
287
+ gx = 1. / torch.sqrt(var + eps) * (g - y * mean_gy - mean_g)
288
+ return gx, (grad_output * y).sum(dim=3).sum(dim=2).sum(dim=0), grad_output.sum(dim=3).sum(dim=2).sum(
289
+ dim=0), None
290
+
291
+ class LayerNorm2d(nn.Module):
292
+
293
+ def __init__(self, channels, eps=1e-6):
294
+ super(LayerNorm2d, self).__init__()
295
+ self.register_parameter('weight', nn.Parameter(torch.ones(channels)))
296
+ self.register_parameter('bias', nn.Parameter(torch.zeros(channels)))
297
+ self.eps = eps
298
+
299
+ def forward(self, x):
300
+ return LayerNormFunction.apply(x, self.weight, self.bias, self.eps)
301
+
302
+ # handle multiple input
303
+ class MySequential(nn.Sequential):
304
+ def forward(self, *inputs):
305
+ for module in self._modules.values():
306
+ if type(inputs) == tuple:
307
+ inputs = module(*inputs)
308
+ else:
309
+ inputs = module(inputs)
310
+ return inputs
311
+
312
+ import time
313
+ def measure_inference_speed(model, data, max_iter=200, log_interval=50):
314
+ model.eval()
315
+
316
+ # the first several iterations may be very slow so skip them
317
+ num_warmup = 5
318
+ pure_inf_time = 0
319
+ fps = 0
320
+
321
+ # benchmark with 2000 image and take the average
322
+ for i in range(max_iter):
323
+
324
+ torch.cuda.synchronize()
325
+ start_time = time.perf_counter()
326
+
327
+ with torch.no_grad():
328
+ model(*data)
329
+
330
+ torch.cuda.synchronize()
331
+ elapsed = time.perf_counter() - start_time
332
+
333
+ if i >= num_warmup:
334
+ pure_inf_time += elapsed
335
+ if (i + 1) % log_interval == 0:
336
+ fps = (i + 1 - num_warmup) / pure_inf_time
337
+ print(
338
+ f'Done image [{i + 1:<3}/ {max_iter}], '
339
+ f'fps: {fps:.1f} img / s, '
340
+ f'times per image: {1000 / fps:.1f} ms / img',
341
+ flush=True)
342
+
343
+ if (i + 1) == max_iter:
344
+ fps = (i + 1 - num_warmup) / pure_inf_time
345
+ print(
346
+ f'Overall fps: {fps:.1f} img / s, '
347
+ f'times per image: {1000 / fps:.1f} ms / img',
348
+ flush=True)
349
+ break
350
+ return fps
NAFNet/basicsr/models/archs/local_arch.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+
10
+ class AvgPool2d(nn.Module):
11
+ def __init__(self, kernel_size=None, base_size=None, auto_pad=True, fast_imp=False, train_size=None):
12
+ super().__init__()
13
+ self.kernel_size = kernel_size
14
+ self.base_size = base_size
15
+ self.auto_pad = auto_pad
16
+
17
+ # only used for fast implementation
18
+ self.fast_imp = fast_imp
19
+ self.rs = [5, 4, 3, 2, 1]
20
+ self.max_r1 = self.rs[0]
21
+ self.max_r2 = self.rs[0]
22
+ self.train_size = train_size
23
+
24
+ def extra_repr(self) -> str:
25
+ return 'kernel_size={}, base_size={}, stride={}, fast_imp={}'.format(
26
+ self.kernel_size, self.base_size, self.kernel_size, self.fast_imp
27
+ )
28
+
29
+ def forward(self, x):
30
+ if self.kernel_size is None and self.base_size:
31
+ train_size = self.train_size
32
+ if isinstance(self.base_size, int):
33
+ self.base_size = (self.base_size, self.base_size)
34
+ self.kernel_size = list(self.base_size)
35
+ self.kernel_size[0] = x.shape[2] * self.base_size[0] // train_size[-2]
36
+ self.kernel_size[1] = x.shape[3] * self.base_size[1] // train_size[-1]
37
+
38
+ # only used for fast implementation
39
+ self.max_r1 = max(1, self.rs[0] * x.shape[2] // train_size[-2])
40
+ self.max_r2 = max(1, self.rs[0] * x.shape[3] // train_size[-1])
41
+
42
+ if self.kernel_size[0] >= x.size(-2) and self.kernel_size[1] >= x.size(-1):
43
+ return F.adaptive_avg_pool2d(x, 1)
44
+
45
+ if self.fast_imp: # Non-equivalent implementation but faster
46
+ h, w = x.shape[2:]
47
+ if self.kernel_size[0] >= h and self.kernel_size[1] >= w:
48
+ out = F.adaptive_avg_pool2d(x, 1)
49
+ else:
50
+ r1 = [r for r in self.rs if h % r == 0][0]
51
+ r2 = [r for r in self.rs if w % r == 0][0]
52
+ # reduction_constraint
53
+ r1 = min(self.max_r1, r1)
54
+ r2 = min(self.max_r2, r2)
55
+ s = x[:, :, ::r1, ::r2].cumsum(dim=-1).cumsum(dim=-2)
56
+ n, c, h, w = s.shape
57
+ k1, k2 = min(h - 1, self.kernel_size[0] // r1), min(w - 1, self.kernel_size[1] // r2)
58
+ out = (s[:, :, :-k1, :-k2] - s[:, :, :-k1, k2:] - s[:, :, k1:, :-k2] + s[:, :, k1:, k2:]) / (k1 * k2)
59
+ out = torch.nn.functional.interpolate(out, scale_factor=(r1, r2))
60
+ else:
61
+ n, c, h, w = x.shape
62
+ s = x.cumsum(dim=-1).cumsum_(dim=-2)
63
+ s = torch.nn.functional.pad(s, (1, 0, 1, 0)) # pad 0 for convenience
64
+ k1, k2 = min(h, self.kernel_size[0]), min(w, self.kernel_size[1])
65
+ s1, s2, s3, s4 = s[:, :, :-k1, :-k2], s[:, :, :-k1, k2:], s[:, :, k1:, :-k2], s[:, :, k1:, k2:]
66
+ out = s4 + s1 - s2 - s3
67
+ out = out / (k1 * k2)
68
+
69
+ if self.auto_pad:
70
+ n, c, h, w = x.shape
71
+ _h, _w = out.shape[2:]
72
+ # print(x.shape, self.kernel_size)
73
+ pad2d = ((w - _w) // 2, (w - _w + 1) // 2, (h - _h) // 2, (h - _h + 1) // 2)
74
+ out = torch.nn.functional.pad(out, pad2d, mode='replicate')
75
+
76
+ return out
77
+
78
+ def replace_layers(model, base_size, train_size, fast_imp, **kwargs):
79
+ for n, m in model.named_children():
80
+ if len(list(m.children())) > 0:
81
+ ## compound module, go inside it
82
+ replace_layers(m, base_size, train_size, fast_imp, **kwargs)
83
+
84
+ if isinstance(m, nn.AdaptiveAvgPool2d):
85
+ pool = AvgPool2d(base_size=base_size, fast_imp=fast_imp, train_size=train_size)
86
+ assert m.output_size == 1
87
+ setattr(model, n, pool)
88
+
89
+
90
+ '''
91
+ ref.
92
+ @article{chu2021tlsc,
93
+ title={Revisiting Global Statistics Aggregation for Improving Image Restoration},
94
+ author={Chu, Xiaojie and Chen, Liangyu and and Chen, Chengpeng and Lu, Xin},
95
+ journal={arXiv preprint arXiv:2112.04491},
96
+ year={2021}
97
+ }
98
+ '''
99
+ class Local_Base():
100
+ def convert(self, *args, train_size, **kwargs):
101
+ replace_layers(self, *args, train_size=train_size, **kwargs)
102
+ imgs = torch.rand(train_size)
103
+ with torch.no_grad():
104
+ self.forward(imgs)
NAFNet/basicsr/models/base_model.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import logging
8
+ import os
9
+ import torch
10
+ from collections import OrderedDict
11
+ from copy import deepcopy
12
+ from torch.nn.parallel import DataParallel, DistributedDataParallel
13
+
14
+ from basicsr.models import lr_scheduler as lr_scheduler
15
+ from basicsr.utils.dist_util import master_only
16
+
17
+ logger = logging.getLogger('basicsr')
18
+
19
+
20
+ class BaseModel():
21
+ """Base model."""
22
+
23
+ def __init__(self, opt):
24
+ self.opt = opt
25
+ self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
26
+ self.is_train = opt['is_train']
27
+ self.schedulers = []
28
+ self.optimizers = []
29
+
30
+ def feed_data(self, data):
31
+ pass
32
+
33
+ def optimize_parameters(self):
34
+ pass
35
+
36
+ def get_current_visuals(self):
37
+ pass
38
+
39
+ def save(self, epoch, current_iter):
40
+ """Save networks and training state."""
41
+ pass
42
+
43
+ def validation(self, dataloader, current_iter, tb_logger, save_img=False, rgb2bgr=True, use_image=True):
44
+ """Validation function.
45
+
46
+ Args:
47
+ dataloader (torch.utils.data.DataLoader): Validation dataloader.
48
+ current_iter (int): Current iteration.
49
+ tb_logger (tensorboard logger): Tensorboard logger.
50
+ save_img (bool): Whether to save images. Default: False.
51
+ rgb2bgr (bool): Whether to save images using rgb2bgr. Default: True
52
+ use_image (bool): Whether to use saved images to compute metrics (PSNR, SSIM), if not, then use data directly from network' output. Default: True
53
+ """
54
+ if self.opt['dist']:
55
+ return self.dist_validation(dataloader, current_iter, tb_logger, save_img, rgb2bgr, use_image)
56
+ else:
57
+ return self.nondist_validation(dataloader, current_iter, tb_logger,
58
+ save_img, rgb2bgr, use_image)
59
+
60
+ def get_current_log(self):
61
+ return self.log_dict
62
+
63
+ def model_to_device(self, net):
64
+ """Model to device. It also warps models with DistributedDataParallel
65
+ or DataParallel.
66
+
67
+ Args:
68
+ net (nn.Module)
69
+ """
70
+
71
+ net = net.to(self.device)
72
+ if self.opt['dist']:
73
+ find_unused_parameters = self.opt.get('find_unused_parameters',
74
+ False)
75
+ net = DistributedDataParallel(
76
+ net,
77
+ device_ids=[torch.cuda.current_device()],
78
+ find_unused_parameters=find_unused_parameters)
79
+ elif self.opt['num_gpu'] > 1:
80
+ net = DataParallel(net)
81
+ return net
82
+
83
+ def setup_schedulers(self):
84
+ """Set up schedulers."""
85
+ train_opt = self.opt['train']
86
+ scheduler_type = train_opt['scheduler'].pop('type')
87
+ if scheduler_type in ['MultiStepLR', 'MultiStepRestartLR']:
88
+ for optimizer in self.optimizers:
89
+ self.schedulers.append(
90
+ lr_scheduler.MultiStepRestartLR(optimizer,
91
+ **train_opt['scheduler']))
92
+ elif scheduler_type == 'CosineAnnealingRestartLR':
93
+ for optimizer in self.optimizers:
94
+ self.schedulers.append(
95
+ lr_scheduler.CosineAnnealingRestartLR(
96
+ optimizer, **train_opt['scheduler']))
97
+ elif scheduler_type == 'TrueCosineAnnealingLR':
98
+ print('..', 'cosineannealingLR')
99
+ for optimizer in self.optimizers:
100
+ self.schedulers.append(
101
+ torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, **train_opt['scheduler']))
102
+ elif scheduler_type == 'LinearLR':
103
+ for optimizer in self.optimizers:
104
+ self.schedulers.append(
105
+ lr_scheduler.LinearLR(
106
+ optimizer, train_opt['total_iter']))
107
+ elif scheduler_type == 'VibrateLR':
108
+ for optimizer in self.optimizers:
109
+ self.schedulers.append(
110
+ lr_scheduler.VibrateLR(
111
+ optimizer, train_opt['total_iter']))
112
+ else:
113
+ raise NotImplementedError(
114
+ f'Scheduler {scheduler_type} is not implemented yet.')
115
+
116
+ def get_bare_model(self, net):
117
+ """Get bare model, especially under wrapping with
118
+ DistributedDataParallel or DataParallel.
119
+ """
120
+ if isinstance(net, (DataParallel, DistributedDataParallel)):
121
+ net = net.module
122
+ return net
123
+
124
+ @master_only
125
+ def print_network(self, net):
126
+ """Print the str and parameter number of a network.
127
+
128
+ Args:
129
+ net (nn.Module)
130
+ """
131
+ if isinstance(net, (DataParallel, DistributedDataParallel)):
132
+ net_cls_str = (f'{net.__class__.__name__} - '
133
+ f'{net.module.__class__.__name__}')
134
+ else:
135
+ net_cls_str = f'{net.__class__.__name__}'
136
+
137
+ net = self.get_bare_model(net)
138
+ net_str = str(net)
139
+ net_params = sum(map(lambda x: x.numel(), net.parameters()))
140
+
141
+ logger.info(
142
+ f'Network: {net_cls_str}, with parameters: {net_params:,d}')
143
+ logger.info(net_str)
144
+
145
+ def _set_lr(self, lr_groups_l):
146
+ """Set learning rate for warmup.
147
+
148
+ Args:
149
+ lr_groups_l (list): List for lr_groups, each for an optimizer.
150
+ """
151
+ for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):
152
+ for param_group, lr in zip(optimizer.param_groups, lr_groups):
153
+ param_group['lr'] = lr
154
+
155
+ def _get_init_lr(self):
156
+ """Get the initial lr, which is set by the scheduler.
157
+ """
158
+ init_lr_groups_l = []
159
+ for optimizer in self.optimizers:
160
+ init_lr_groups_l.append(
161
+ [v['initial_lr'] for v in optimizer.param_groups])
162
+ return init_lr_groups_l
163
+
164
+ def update_learning_rate(self, current_iter, warmup_iter=-1):
165
+ """Update learning rate.
166
+
167
+ Args:
168
+ current_iter (int): Current iteration.
169
+ warmup_iter (int): Warmup iter numbers. -1 for no warmup.
170
+ Default: -1.
171
+ """
172
+ if current_iter > 1:
173
+ for scheduler in self.schedulers:
174
+ scheduler.step()
175
+ # set up warm-up learning rate
176
+ if current_iter < warmup_iter:
177
+ # get initial lr for each group
178
+ init_lr_g_l = self._get_init_lr()
179
+ # modify warming-up learning rates
180
+ # currently only support linearly warm up
181
+ warm_up_lr_l = []
182
+ for init_lr_g in init_lr_g_l:
183
+ warm_up_lr_l.append(
184
+ [v / warmup_iter * current_iter for v in init_lr_g])
185
+ # set learning rate
186
+ self._set_lr(warm_up_lr_l)
187
+
188
+ def get_current_learning_rate(self):
189
+ return [
190
+ param_group['lr']
191
+ for param_group in self.optimizers[0].param_groups
192
+ ]
193
+
194
+ @master_only
195
+ def save_network(self, net, net_label, current_iter, param_key='params'):
196
+ """Save networks.
197
+
198
+ Args:
199
+ net (nn.Module | list[nn.Module]): Network(s) to be saved.
200
+ net_label (str): Network label.
201
+ current_iter (int): Current iter number.
202
+ param_key (str | list[str]): The parameter key(s) to save network.
203
+ Default: 'params'.
204
+ """
205
+ if current_iter == -1:
206
+ current_iter = 'latest'
207
+ save_filename = f'{net_label}_{current_iter}.pth'
208
+ save_path = os.path.join(self.opt['path']['models'], save_filename)
209
+
210
+ net = net if isinstance(net, list) else [net]
211
+ param_key = param_key if isinstance(param_key, list) else [param_key]
212
+ assert len(net) == len(
213
+ param_key), 'The lengths of net and param_key should be the same.'
214
+
215
+ save_dict = {}
216
+ for net_, param_key_ in zip(net, param_key):
217
+ net_ = self.get_bare_model(net_)
218
+ state_dict = net_.state_dict()
219
+ for key, param in state_dict.items():
220
+ if key.startswith('module.'): # remove unnecessary 'module.'
221
+ key = key[7:]
222
+ state_dict[key] = param.cpu()
223
+ save_dict[param_key_] = state_dict
224
+
225
+ torch.save(save_dict, save_path)
226
+
227
+ def _print_different_keys_loading(self, crt_net, load_net, strict=True):
228
+ """Print keys with differnet name or different size when loading models.
229
+
230
+ 1. Print keys with differnet names.
231
+ 2. If strict=False, print the same key but with different tensor size.
232
+ It also ignore these keys with different sizes (not load).
233
+
234
+ Args:
235
+ crt_net (torch model): Current network.
236
+ load_net (dict): Loaded network.
237
+ strict (bool): Whether strictly loaded. Default: True.
238
+ """
239
+ crt_net = self.get_bare_model(crt_net)
240
+ crt_net = crt_net.state_dict()
241
+ crt_net_keys = set(crt_net.keys())
242
+ load_net_keys = set(load_net.keys())
243
+
244
+ if crt_net_keys != load_net_keys:
245
+ logger.warning('Current net - loaded net:')
246
+ for v in sorted(list(crt_net_keys - load_net_keys)):
247
+ logger.warning(f' {v}')
248
+ logger.warning('Loaded net - current net:')
249
+ for v in sorted(list(load_net_keys - crt_net_keys)):
250
+ logger.warning(f' {v}')
251
+
252
+ # check the size for the same keys
253
+ if not strict:
254
+ common_keys = crt_net_keys & load_net_keys
255
+ for k in common_keys:
256
+ if crt_net[k].size() != load_net[k].size():
257
+ logger.warning(
258
+ f'Size different, ignore [{k}]: crt_net: '
259
+ f'{crt_net[k].shape}; load_net: {load_net[k].shape}')
260
+ load_net[k + '.ignore'] = load_net.pop(k)
261
+
262
+ def load_network(self, net, load_path, strict=True, param_key='params'):
263
+ """Load network.
264
+
265
+ Args:
266
+ load_path (str): The path of networks to be loaded.
267
+ net (nn.Module): Network.
268
+ strict (bool): Whether strictly loaded.
269
+ param_key (str): The parameter key of loaded network. If set to
270
+ None, use the root 'path'.
271
+ Default: 'params'.
272
+ """
273
+ net = self.get_bare_model(net)
274
+ logger.info(
275
+ f'Loading {net.__class__.__name__} model from {load_path}.')
276
+ load_net = torch.load(
277
+ load_path, map_location=lambda storage, loc: storage)
278
+ if param_key is not None:
279
+ load_net = load_net[param_key]
280
+ print(' load net keys', load_net.keys)
281
+ # remove unnecessary 'module.'
282
+ for k, v in deepcopy(load_net).items():
283
+ if k.startswith('module.'):
284
+ load_net[k[7:]] = v
285
+ load_net.pop(k)
286
+ self._print_different_keys_loading(net, load_net, strict)
287
+ net.load_state_dict(load_net, strict=strict)
288
+
289
+ @master_only
290
+ def save_training_state(self, epoch, current_iter):
291
+ """Save training states during training, which will be used for
292
+ resuming.
293
+
294
+ Args:
295
+ epoch (int): Current epoch.
296
+ current_iter (int): Current iteration.
297
+ """
298
+ if current_iter != -1:
299
+ state = {
300
+ 'epoch': epoch,
301
+ 'iter': current_iter,
302
+ 'optimizers': [],
303
+ 'schedulers': []
304
+ }
305
+ for o in self.optimizers:
306
+ state['optimizers'].append(o.state_dict())
307
+ for s in self.schedulers:
308
+ state['schedulers'].append(s.state_dict())
309
+ save_filename = f'{current_iter}.state'
310
+ save_path = os.path.join(self.opt['path']['training_states'],
311
+ save_filename)
312
+ torch.save(state, save_path)
313
+
314
+ def resume_training(self, resume_state):
315
+ """Reload the optimizers and schedulers for resumed training.
316
+
317
+ Args:
318
+ resume_state (dict): Resume state.
319
+ """
320
+ resume_optimizers = resume_state['optimizers']
321
+ resume_schedulers = resume_state['schedulers']
322
+ assert len(resume_optimizers) == len(
323
+ self.optimizers), 'Wrong lengths of optimizers'
324
+ assert len(resume_schedulers) == len(
325
+ self.schedulers), 'Wrong lengths of schedulers'
326
+ for i, o in enumerate(resume_optimizers):
327
+ self.optimizers[i].load_state_dict(o)
328
+ for i, s in enumerate(resume_schedulers):
329
+ self.schedulers[i].load_state_dict(s)
330
+
331
+ def reduce_loss_dict(self, loss_dict):
332
+ """reduce loss dict.
333
+
334
+ In distributed training, it averages the losses among different GPUs .
335
+
336
+ Args:
337
+ loss_dict (OrderedDict): Loss dict.
338
+ """
339
+ with torch.no_grad():
340
+ if self.opt['dist']:
341
+ keys = []
342
+ losses = []
343
+ for name, value in loss_dict.items():
344
+ keys.append(name)
345
+ losses.append(value)
346
+ losses = torch.stack(losses, 0)
347
+ torch.distributed.reduce(losses, dst=0)
348
+ if self.opt['rank'] == 0:
349
+ losses /= self.opt['world_size']
350
+ loss_dict = {key: loss for key, loss in zip(keys, losses)}
351
+
352
+ log_dict = OrderedDict()
353
+ for name, value in loss_dict.items():
354
+ log_dict[name] = value.mean().item()
355
+
356
+ return log_dict
NAFNet/basicsr/models/image_restoration_model.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import importlib
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from collections import OrderedDict
11
+ from copy import deepcopy
12
+ from os import path as osp
13
+ from tqdm import tqdm
14
+
15
+ from basicsr.models.archs import define_network
16
+ from basicsr.models.base_model import BaseModel
17
+ from basicsr.utils import get_root_logger, imwrite, tensor2img
18
+ from basicsr.utils.dist_util import get_dist_info
19
+
20
+ loss_module = importlib.import_module('basicsr.models.losses')
21
+ metric_module = importlib.import_module('basicsr.metrics')
22
+
23
+ class ImageRestorationModel(BaseModel):
24
+ """Base Deblur model for single image deblur."""
25
+
26
+ def __init__(self, opt):
27
+ super(ImageRestorationModel, self).__init__(opt)
28
+
29
+ # define network
30
+ self.net_g = define_network(deepcopy(opt['network_g']))
31
+ self.net_g = self.model_to_device(self.net_g)
32
+
33
+ # load pretrained models
34
+ load_path = self.opt['path'].get('pretrain_network_g', None)
35
+ if load_path is not None:
36
+ self.load_network(self.net_g, load_path,
37
+ self.opt['path'].get('strict_load_g', True), param_key=self.opt['path'].get('param_key', 'params'))
38
+
39
+ if self.is_train:
40
+ self.init_training_settings()
41
+
42
+ self.scale = int(opt['scale'])
43
+
44
+ def init_training_settings(self):
45
+ self.net_g.train()
46
+ train_opt = self.opt['train']
47
+
48
+ # define losses
49
+ if train_opt.get('pixel_opt'):
50
+ pixel_type = train_opt['pixel_opt'].pop('type')
51
+ cri_pix_cls = getattr(loss_module, pixel_type)
52
+ self.cri_pix = cri_pix_cls(**train_opt['pixel_opt']).to(
53
+ self.device)
54
+ else:
55
+ self.cri_pix = None
56
+
57
+ if train_opt.get('perceptual_opt'):
58
+ percep_type = train_opt['perceptual_opt'].pop('type')
59
+ cri_perceptual_cls = getattr(loss_module, percep_type)
60
+ self.cri_perceptual = cri_perceptual_cls(
61
+ **train_opt['perceptual_opt']).to(self.device)
62
+ else:
63
+ self.cri_perceptual = None
64
+
65
+ if self.cri_pix is None and self.cri_perceptual is None:
66
+ raise ValueError('Both pixel and perceptual losses are None.')
67
+
68
+ # set up optimizers and schedulers
69
+ self.setup_optimizers()
70
+ self.setup_schedulers()
71
+
72
+ def setup_optimizers(self):
73
+ train_opt = self.opt['train']
74
+ optim_params = []
75
+
76
+ for k, v in self.net_g.named_parameters():
77
+ if v.requires_grad:
78
+ # if k.startswith('module.offsets') or k.startswith('module.dcns'):
79
+ # optim_params_lowlr.append(v)
80
+ # else:
81
+ optim_params.append(v)
82
+ # else:
83
+ # logger = get_root_logger()
84
+ # logger.warning(f'Params {k} will not be optimized.')
85
+ # print(optim_params)
86
+ # ratio = 0.1
87
+
88
+ optim_type = train_opt['optim_g'].pop('type')
89
+ if optim_type == 'Adam':
90
+ self.optimizer_g = torch.optim.Adam([{'params': optim_params}],
91
+ **train_opt['optim_g'])
92
+ elif optim_type == 'SGD':
93
+ self.optimizer_g = torch.optim.SGD(optim_params,
94
+ **train_opt['optim_g'])
95
+ elif optim_type == 'AdamW':
96
+ self.optimizer_g = torch.optim.AdamW([{'params': optim_params}],
97
+ **train_opt['optim_g'])
98
+ pass
99
+ else:
100
+ raise NotImplementedError(
101
+ f'optimizer {optim_type} is not supperted yet.')
102
+ self.optimizers.append(self.optimizer_g)
103
+
104
+ def feed_data(self, data, is_val=False):
105
+ self.lq = data['lq'].to(self.device)
106
+ if 'gt' in data:
107
+ self.gt = data['gt'].to(self.device)
108
+
109
+ def grids(self):
110
+ b, c, h, w = self.gt.size()
111
+ self.original_size = (b, c, h, w)
112
+
113
+ assert b == 1
114
+ if 'crop_size_h' in self.opt['val']:
115
+ crop_size_h = self.opt['val']['crop_size_h']
116
+ else:
117
+ crop_size_h = int(self.opt['val'].get('crop_size_h_ratio') * h)
118
+
119
+ if 'crop_size_w' in self.opt['val']:
120
+ crop_size_w = self.opt['val'].get('crop_size_w')
121
+ else:
122
+ crop_size_w = int(self.opt['val'].get('crop_size_w_ratio') * w)
123
+
124
+
125
+ crop_size_h, crop_size_w = crop_size_h // self.scale * self.scale, crop_size_w // self.scale * self.scale
126
+ #adaptive step_i, step_j
127
+ num_row = (h - 1) // crop_size_h + 1
128
+ num_col = (w - 1) // crop_size_w + 1
129
+
130
+ import math
131
+ step_j = crop_size_w if num_col == 1 else math.ceil((w - crop_size_w) / (num_col - 1) - 1e-8)
132
+ step_i = crop_size_h if num_row == 1 else math.ceil((h - crop_size_h) / (num_row - 1) - 1e-8)
133
+
134
+ scale = self.scale
135
+ step_i = step_i//scale*scale
136
+ step_j = step_j//scale*scale
137
+
138
+ parts = []
139
+ idxes = []
140
+
141
+ i = 0 # 0~h-1
142
+ last_i = False
143
+ while i < h and not last_i:
144
+ j = 0
145
+ if i + crop_size_h >= h:
146
+ i = h - crop_size_h
147
+ last_i = True
148
+
149
+ last_j = False
150
+ while j < w and not last_j:
151
+ if j + crop_size_w >= w:
152
+ j = w - crop_size_w
153
+ last_j = True
154
+ parts.append(self.lq[:, :, i // scale :(i + crop_size_h) // scale, j // scale:(j + crop_size_w) // scale])
155
+ idxes.append({'i': i, 'j': j})
156
+ j = j + step_j
157
+ i = i + step_i
158
+
159
+ self.origin_lq = self.lq
160
+ self.lq = torch.cat(parts, dim=0)
161
+ self.idxes = idxes
162
+
163
+ def grids_inverse(self):
164
+ preds = torch.zeros(self.original_size)
165
+ b, c, h, w = self.original_size
166
+
167
+ count_mt = torch.zeros((b, 1, h, w))
168
+ if 'crop_size_h' in self.opt['val']:
169
+ crop_size_h = self.opt['val']['crop_size_h']
170
+ else:
171
+ crop_size_h = int(self.opt['val'].get('crop_size_h_ratio') * h)
172
+
173
+ if 'crop_size_w' in self.opt['val']:
174
+ crop_size_w = self.opt['val'].get('crop_size_w')
175
+ else:
176
+ crop_size_w = int(self.opt['val'].get('crop_size_w_ratio') * w)
177
+
178
+ crop_size_h, crop_size_w = crop_size_h // self.scale * self.scale, crop_size_w // self.scale * self.scale
179
+
180
+ for cnt, each_idx in enumerate(self.idxes):
181
+ i = each_idx['i']
182
+ j = each_idx['j']
183
+ preds[0, :, i: i + crop_size_h, j: j + crop_size_w] += self.outs[cnt]
184
+ count_mt[0, 0, i: i + crop_size_h, j: j + crop_size_w] += 1.
185
+
186
+ self.output = (preds / count_mt).to(self.device)
187
+ self.lq = self.origin_lq
188
+
189
+ def optimize_parameters(self, current_iter, tb_logger):
190
+ self.optimizer_g.zero_grad()
191
+
192
+ if self.opt['train'].get('mixup', False):
193
+ self.mixup_aug()
194
+
195
+ preds = self.net_g(self.lq)
196
+ if not isinstance(preds, list):
197
+ preds = [preds]
198
+
199
+ self.output = preds[-1]
200
+
201
+ l_total = 0
202
+ loss_dict = OrderedDict()
203
+ # pixel loss
204
+ if self.cri_pix:
205
+ l_pix = 0.
206
+ for pred in preds:
207
+ l_pix += self.cri_pix(pred, self.gt)
208
+
209
+ # print('l pix ... ', l_pix)
210
+ l_total += l_pix
211
+ loss_dict['l_pix'] = l_pix
212
+
213
+ # perceptual loss
214
+ if self.cri_perceptual:
215
+ l_percep, l_style = self.cri_perceptual(self.output, self.gt)
216
+ #
217
+ if l_percep is not None:
218
+ l_total += l_percep
219
+ loss_dict['l_percep'] = l_percep
220
+ if l_style is not None:
221
+ l_total += l_style
222
+ loss_dict['l_style'] = l_style
223
+
224
+
225
+ l_total = l_total + 0. * sum(p.sum() for p in self.net_g.parameters())
226
+
227
+ l_total.backward()
228
+ use_grad_clip = self.opt['train'].get('use_grad_clip', True)
229
+ if use_grad_clip:
230
+ torch.nn.utils.clip_grad_norm_(self.net_g.parameters(), 0.01)
231
+ self.optimizer_g.step()
232
+
233
+
234
+ self.log_dict = self.reduce_loss_dict(loss_dict)
235
+
236
+ def test(self):
237
+ self.net_g.eval()
238
+ with torch.no_grad():
239
+ n = len(self.lq)
240
+ outs = []
241
+ m = self.opt['val'].get('max_minibatch', n)
242
+ i = 0
243
+ while i < n:
244
+ j = i + m
245
+ if j >= n:
246
+ j = n
247
+ pred = self.net_g(self.lq[i:j])
248
+ if isinstance(pred, list):
249
+ pred = pred[-1]
250
+ outs.append(pred.detach().cpu())
251
+ i = j
252
+
253
+ self.output = torch.cat(outs, dim=0)
254
+ self.net_g.train()
255
+
256
+ def dist_validation(self, dataloader, current_iter, tb_logger, save_img, rgb2bgr, use_image):
257
+ dataset_name = dataloader.dataset.opt['name']
258
+ with_metrics = self.opt['val'].get('metrics') is not None
259
+ if with_metrics:
260
+ self.metric_results = {
261
+ metric: 0
262
+ for metric in self.opt['val']['metrics'].keys()
263
+ }
264
+
265
+ rank, world_size = get_dist_info()
266
+ if rank == 0:
267
+ pbar = tqdm(total=len(dataloader), unit='image')
268
+
269
+ cnt = 0
270
+
271
+ for idx, val_data in enumerate(dataloader):
272
+ if idx % world_size != rank:
273
+ continue
274
+
275
+ img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
276
+
277
+ self.feed_data(val_data, is_val=True)
278
+ if self.opt['val'].get('grids', False):
279
+ self.grids()
280
+
281
+ self.test()
282
+
283
+ if self.opt['val'].get('grids', False):
284
+ self.grids_inverse()
285
+
286
+ visuals = self.get_current_visuals()
287
+ sr_img = tensor2img([visuals['result']], rgb2bgr=rgb2bgr)
288
+ if 'gt' in visuals:
289
+ gt_img = tensor2img([visuals['gt']], rgb2bgr=rgb2bgr)
290
+ del self.gt
291
+
292
+ # tentative for out of GPU memory
293
+ del self.lq
294
+ del self.output
295
+ torch.cuda.empty_cache()
296
+
297
+ if save_img:
298
+ if sr_img.shape[2] == 6:
299
+ L_img = sr_img[:, :, :3]
300
+ R_img = sr_img[:, :, 3:]
301
+
302
+ # visual_dir = osp.join('visual_results', dataset_name, self.opt['name'])
303
+ visual_dir = osp.join(self.opt['path']['visualization'], dataset_name)
304
+
305
+ imwrite(L_img, osp.join(visual_dir, f'{img_name}_L.png'))
306
+ imwrite(R_img, osp.join(visual_dir, f'{img_name}_R.png'))
307
+ else:
308
+ if self.opt['is_train']:
309
+
310
+ save_img_path = osp.join(self.opt['path']['visualization'],
311
+ img_name,
312
+ f'{img_name}_{current_iter}.png')
313
+
314
+ save_gt_img_path = osp.join(self.opt['path']['visualization'],
315
+ img_name,
316
+ f'{img_name}_{current_iter}_gt.png')
317
+ else:
318
+ save_img_path = osp.join(
319
+ self.opt['path']['visualization'], dataset_name,
320
+ f'{img_name}.png')
321
+ save_gt_img_path = osp.join(
322
+ self.opt['path']['visualization'], dataset_name,
323
+ f'{img_name}_gt.png')
324
+
325
+ imwrite(sr_img, save_img_path)
326
+ imwrite(gt_img, save_gt_img_path)
327
+
328
+ if with_metrics:
329
+ # calculate metrics
330
+ opt_metric = deepcopy(self.opt['val']['metrics'])
331
+ if use_image:
332
+ for name, opt_ in opt_metric.items():
333
+ metric_type = opt_.pop('type')
334
+ self.metric_results[name] += getattr(
335
+ metric_module, metric_type)(sr_img, gt_img, **opt_)
336
+ else:
337
+ for name, opt_ in opt_metric.items():
338
+ metric_type = opt_.pop('type')
339
+ self.metric_results[name] += getattr(
340
+ metric_module, metric_type)(visuals['result'], visuals['gt'], **opt_)
341
+
342
+ cnt += 1
343
+ if rank == 0:
344
+ for _ in range(world_size):
345
+ pbar.update(1)
346
+ pbar.set_description(f'Test {img_name}')
347
+ if rank == 0:
348
+ pbar.close()
349
+
350
+ # current_metric = 0.
351
+ collected_metrics = OrderedDict()
352
+ if with_metrics:
353
+ for metric in self.metric_results.keys():
354
+ collected_metrics[metric] = torch.tensor(self.metric_results[metric]).float().to(self.device)
355
+ collected_metrics['cnt'] = torch.tensor(cnt).float().to(self.device)
356
+
357
+ self.collected_metrics = collected_metrics
358
+
359
+ keys = []
360
+ metrics = []
361
+ for name, value in self.collected_metrics.items():
362
+ keys.append(name)
363
+ metrics.append(value)
364
+ metrics = torch.stack(metrics, 0)
365
+ torch.distributed.reduce(metrics, dst=0)
366
+ if self.opt['rank'] == 0:
367
+ metrics_dict = {}
368
+ cnt = 0
369
+ for key, metric in zip(keys, metrics):
370
+ if key == 'cnt':
371
+ cnt = float(metric)
372
+ continue
373
+ metrics_dict[key] = float(metric)
374
+
375
+ for key in metrics_dict:
376
+ metrics_dict[key] /= cnt
377
+
378
+ self._log_validation_metric_values(current_iter, dataloader.dataset.opt['name'],
379
+ tb_logger, metrics_dict)
380
+ return 0.
381
+
382
+ def nondist_validation(self, *args, **kwargs):
383
+ logger = get_root_logger()
384
+ logger.warning('nondist_validation is not implemented. Run dist_validation.')
385
+ self.dist_validation(*args, **kwargs)
386
+
387
+
388
+ def _log_validation_metric_values(self, current_iter, dataset_name,
389
+ tb_logger, metric_dict):
390
+ log_str = f'Validation {dataset_name}, \t'
391
+ for metric, value in metric_dict.items():
392
+ log_str += f'\t # {metric}: {value:.4f}'
393
+ logger = get_root_logger()
394
+ logger.info(log_str)
395
+
396
+ log_dict = OrderedDict()
397
+ # for name, value in loss_dict.items():
398
+ for metric, value in metric_dict.items():
399
+ log_dict[f'm_{metric}'] = value
400
+
401
+ self.log_dict = log_dict
402
+
403
+ def get_current_visuals(self):
404
+ out_dict = OrderedDict()
405
+ out_dict['lq'] = self.lq.detach().cpu()
406
+ out_dict['result'] = self.output.detach().cpu()
407
+ if hasattr(self, 'gt'):
408
+ out_dict['gt'] = self.gt.detach().cpu()
409
+ return out_dict
410
+
411
+ def save(self, epoch, current_iter):
412
+ self.save_network(self.net_g, 'net_g', current_iter)
413
+ self.save_training_state(epoch, current_iter)
NAFNet/basicsr/models/losses/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ from .losses import (L1Loss, MSELoss, PSNRLoss)
8
+
9
+ __all__ = [
10
+ 'L1Loss', 'MSELoss', 'PSNRLoss',
11
+ ]
NAFNet/basicsr/models/losses/loss_util.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import functools
8
+ from torch.nn import functional as F
9
+
10
+
11
+ def reduce_loss(loss, reduction):
12
+ """Reduce loss as specified.
13
+
14
+ Args:
15
+ loss (Tensor): Elementwise loss tensor.
16
+ reduction (str): Options are 'none', 'mean' and 'sum'.
17
+
18
+ Returns:
19
+ Tensor: Reduced loss tensor.
20
+ """
21
+ reduction_enum = F._Reduction.get_enum(reduction)
22
+ # none: 0, elementwise_mean:1, sum: 2
23
+ if reduction_enum == 0:
24
+ return loss
25
+ elif reduction_enum == 1:
26
+ return loss.mean()
27
+ else:
28
+ return loss.sum()
29
+
30
+
31
+ def weight_reduce_loss(loss, weight=None, reduction='mean'):
32
+ """Apply element-wise weight and reduce loss.
33
+
34
+ Args:
35
+ loss (Tensor): Element-wise loss.
36
+ weight (Tensor): Element-wise weights. Default: None.
37
+ reduction (str): Same as built-in losses of PyTorch. Options are
38
+ 'none', 'mean' and 'sum'. Default: 'mean'.
39
+
40
+ Returns:
41
+ Tensor: Loss values.
42
+ """
43
+ # if weight is specified, apply element-wise weight
44
+ if weight is not None:
45
+ assert weight.dim() == loss.dim()
46
+ assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
47
+ loss = loss * weight
48
+
49
+ # if weight is not specified or reduction is sum, just reduce the loss
50
+ if weight is None or reduction == 'sum':
51
+ loss = reduce_loss(loss, reduction)
52
+ # if reduction is mean, then compute mean over weight region
53
+ elif reduction == 'mean':
54
+ if weight.size(1) > 1:
55
+ weight = weight.sum()
56
+ else:
57
+ weight = weight.sum() * loss.size(1)
58
+ loss = loss.sum() / weight
59
+
60
+ return loss
61
+
62
+
63
+ def weighted_loss(loss_func):
64
+ """Create a weighted version of a given loss function.
65
+
66
+ To use this decorator, the loss function must have the signature like
67
+ `loss_func(pred, target, **kwargs)`. The function only needs to compute
68
+ element-wise loss without any reduction. This decorator will add weight
69
+ and reduction arguments to the function. The decorated function will have
70
+ the signature like `loss_func(pred, target, weight=None, reduction='mean',
71
+ **kwargs)`.
72
+
73
+ :Example:
74
+
75
+ >>> import torch
76
+ >>> @weighted_loss
77
+ >>> def l1_loss(pred, target):
78
+ >>> return (pred - target).abs()
79
+
80
+ >>> pred = torch.Tensor([0, 2, 3])
81
+ >>> target = torch.Tensor([1, 1, 1])
82
+ >>> weight = torch.Tensor([1, 0, 1])
83
+
84
+ >>> l1_loss(pred, target)
85
+ tensor(1.3333)
86
+ >>> l1_loss(pred, target, weight)
87
+ tensor(1.5000)
88
+ >>> l1_loss(pred, target, reduction='none')
89
+ tensor([1., 1., 2.])
90
+ >>> l1_loss(pred, target, weight, reduction='sum')
91
+ tensor(3.)
92
+ """
93
+
94
+ @functools.wraps(loss_func)
95
+ def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
96
+ # get element-wise loss
97
+ loss = loss_func(pred, target, **kwargs)
98
+ loss = weight_reduce_loss(loss, weight, reduction)
99
+ return loss
100
+
101
+ return wrapper
NAFNet/basicsr/models/losses/losses.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import torch
8
+ from torch import nn as nn
9
+ from torch.nn import functional as F
10
+ import numpy as np
11
+
12
+ from basicsr.models.losses.loss_util import weighted_loss
13
+
14
+ _reduction_modes = ['none', 'mean', 'sum']
15
+
16
+
17
+ @weighted_loss
18
+ def l1_loss(pred, target):
19
+ return F.l1_loss(pred, target, reduction='none')
20
+
21
+
22
+ @weighted_loss
23
+ def mse_loss(pred, target):
24
+ return F.mse_loss(pred, target, reduction='none')
25
+
26
+
27
+ # @weighted_loss
28
+ # def charbonnier_loss(pred, target, eps=1e-12):
29
+ # return torch.sqrt((pred - target)**2 + eps)
30
+
31
+
32
+ class L1Loss(nn.Module):
33
+ """L1 (mean absolute error, MAE) loss.
34
+
35
+ Args:
36
+ loss_weight (float): Loss weight for L1 loss. Default: 1.0.
37
+ reduction (str): Specifies the reduction to apply to the output.
38
+ Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
39
+ """
40
+
41
+ def __init__(self, loss_weight=1.0, reduction='mean'):
42
+ super(L1Loss, self).__init__()
43
+ if reduction not in ['none', 'mean', 'sum']:
44
+ raise ValueError(f'Unsupported reduction mode: {reduction}. '
45
+ f'Supported ones are: {_reduction_modes}')
46
+
47
+ self.loss_weight = loss_weight
48
+ self.reduction = reduction
49
+
50
+ def forward(self, pred, target, weight=None, **kwargs):
51
+ """
52
+ Args:
53
+ pred (Tensor): of shape (N, C, H, W). Predicted tensor.
54
+ target (Tensor): of shape (N, C, H, W). Ground truth tensor.
55
+ weight (Tensor, optional): of shape (N, C, H, W). Element-wise
56
+ weights. Default: None.
57
+ """
58
+ return self.loss_weight * l1_loss(
59
+ pred, target, weight, reduction=self.reduction)
60
+
61
+ class MSELoss(nn.Module):
62
+ """MSE (L2) loss.
63
+
64
+ Args:
65
+ loss_weight (float): Loss weight for MSE loss. Default: 1.0.
66
+ reduction (str): Specifies the reduction to apply to the output.
67
+ Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
68
+ """
69
+
70
+ def __init__(self, loss_weight=1.0, reduction='mean'):
71
+ super(MSELoss, self).__init__()
72
+ if reduction not in ['none', 'mean', 'sum']:
73
+ raise ValueError(f'Unsupported reduction mode: {reduction}. '
74
+ f'Supported ones are: {_reduction_modes}')
75
+
76
+ self.loss_weight = loss_weight
77
+ self.reduction = reduction
78
+
79
+ def forward(self, pred, target, weight=None, **kwargs):
80
+ """
81
+ Args:
82
+ pred (Tensor): of shape (N, C, H, W). Predicted tensor.
83
+ target (Tensor): of shape (N, C, H, W). Ground truth tensor.
84
+ weight (Tensor, optional): of shape (N, C, H, W). Element-wise
85
+ weights. Default: None.
86
+ """
87
+ return self.loss_weight * mse_loss(
88
+ pred, target, weight, reduction=self.reduction)
89
+
90
+ class PSNRLoss(nn.Module):
91
+
92
+ def __init__(self, loss_weight=1.0, reduction='mean', toY=False):
93
+ super(PSNRLoss, self).__init__()
94
+ assert reduction == 'mean'
95
+ self.loss_weight = loss_weight
96
+ self.scale = 10 / np.log(10)
97
+ self.toY = toY
98
+ self.coef = torch.tensor([65.481, 128.553, 24.966]).reshape(1, 3, 1, 1)
99
+ self.first = True
100
+
101
+ def forward(self, pred, target):
102
+ assert len(pred.size()) == 4
103
+ if self.toY:
104
+ if self.first:
105
+ self.coef = self.coef.to(pred.device)
106
+ self.first = False
107
+
108
+ pred = (pred * self.coef).sum(dim=1).unsqueeze(dim=1) + 16.
109
+ target = (target * self.coef).sum(dim=1).unsqueeze(dim=1) + 16.
110
+
111
+ pred, target = pred / 255., target / 255.
112
+ pass
113
+ assert len(pred.size()) == 4
114
+
115
+ return self.loss_weight * self.scale * torch.log(((pred - target) ** 2).mean(dim=(1, 2, 3)) + 1e-8).mean()
116
+
NAFNet/basicsr/models/lr_scheduler.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import math
8
+ from collections import Counter
9
+ from torch.optim.lr_scheduler import _LRScheduler
10
+
11
+
12
+ class MultiStepRestartLR(_LRScheduler):
13
+ """ MultiStep with restarts learning rate scheme.
14
+
15
+ Args:
16
+ optimizer (torch.nn.optimizer): Torch optimizer.
17
+ milestones (list): Iterations that will decrease learning rate.
18
+ gamma (float): Decrease ratio. Default: 0.1.
19
+ restarts (list): Restart iterations. Default: [0].
20
+ restart_weights (list): Restart weights at each restart iteration.
21
+ Default: [1].
22
+ last_epoch (int): Used in _LRScheduler. Default: -1.
23
+ """
24
+
25
+ def __init__(self,
26
+ optimizer,
27
+ milestones,
28
+ gamma=0.1,
29
+ restarts=(0, ),
30
+ restart_weights=(1, ),
31
+ last_epoch=-1):
32
+ self.milestones = Counter(milestones)
33
+ self.gamma = gamma
34
+ self.restarts = restarts
35
+ self.restart_weights = restart_weights
36
+ assert len(self.restarts) == len(
37
+ self.restart_weights), 'restarts and their weights do not match.'
38
+ super(MultiStepRestartLR, self).__init__(optimizer, last_epoch)
39
+
40
+ def get_lr(self):
41
+ if self.last_epoch in self.restarts:
42
+ weight = self.restart_weights[self.restarts.index(self.last_epoch)]
43
+ return [
44
+ group['initial_lr'] * weight
45
+ for group in self.optimizer.param_groups
46
+ ]
47
+ if self.last_epoch not in self.milestones:
48
+ return [group['lr'] for group in self.optimizer.param_groups]
49
+ return [
50
+ group['lr'] * self.gamma**self.milestones[self.last_epoch]
51
+ for group in self.optimizer.param_groups
52
+ ]
53
+
54
+ class LinearLR(_LRScheduler):
55
+ """
56
+
57
+ Args:
58
+ optimizer (torch.nn.optimizer): Torch optimizer.
59
+ milestones (list): Iterations that will decrease learning rate.
60
+ gamma (float): Decrease ratio. Default: 0.1.
61
+ last_epoch (int): Used in _LRScheduler. Default: -1.
62
+ """
63
+
64
+ def __init__(self,
65
+ optimizer,
66
+ total_iter,
67
+ last_epoch=-1):
68
+ self.total_iter = total_iter
69
+ super(LinearLR, self).__init__(optimizer, last_epoch)
70
+
71
+ def get_lr(self):
72
+ process = self.last_epoch / self.total_iter
73
+ weight = (1 - process)
74
+ # print('get lr ', [weight * group['initial_lr'] for group in self.optimizer.param_groups])
75
+ return [weight * group['initial_lr'] for group in self.optimizer.param_groups]
76
+
77
+ class VibrateLR(_LRScheduler):
78
+ """
79
+
80
+ Args:
81
+ optimizer (torch.nn.optimizer): Torch optimizer.
82
+ milestones (list): Iterations that will decrease learning rate.
83
+ gamma (float): Decrease ratio. Default: 0.1.
84
+ last_epoch (int): Used in _LRScheduler. Default: -1.
85
+ """
86
+
87
+ def __init__(self,
88
+ optimizer,
89
+ total_iter,
90
+ last_epoch=-1):
91
+ self.total_iter = total_iter
92
+ super(VibrateLR, self).__init__(optimizer, last_epoch)
93
+
94
+ def get_lr(self):
95
+ process = self.last_epoch / self.total_iter
96
+
97
+ f = 0.1
98
+ if process < 3 / 8:
99
+ f = 1 - process * 8 / 3
100
+ elif process < 5 / 8:
101
+ f = 0.2
102
+
103
+ T = self.total_iter // 80
104
+ Th = T // 2
105
+
106
+ t = self.last_epoch % T
107
+
108
+ f2 = t / Th
109
+ if t >= Th:
110
+ f2 = 2 - f2
111
+
112
+ weight = f * f2
113
+
114
+ if self.last_epoch < Th:
115
+ weight = max(0.1, weight)
116
+
117
+ # print('f {}, T {}, Th {}, t {}, f2 {}'.format(f, T, Th, t, f2))
118
+ return [weight * group['initial_lr'] for group in self.optimizer.param_groups]
119
+
120
+ def get_position_from_periods(iteration, cumulative_period):
121
+ """Get the position from a period list.
122
+
123
+ It will return the index of the right-closest number in the period list.
124
+ For example, the cumulative_period = [100, 200, 300, 400],
125
+ if iteration == 50, return 0;
126
+ if iteration == 210, return 2;
127
+ if iteration == 300, return 2.
128
+
129
+ Args:
130
+ iteration (int): Current iteration.
131
+ cumulative_period (list[int]): Cumulative period list.
132
+
133
+ Returns:
134
+ int: The position of the right-closest number in the period list.
135
+ """
136
+ for i, period in enumerate(cumulative_period):
137
+ if iteration <= period:
138
+ return i
139
+
140
+
141
+ class CosineAnnealingRestartLR(_LRScheduler):
142
+ """ Cosine annealing with restarts learning rate scheme.
143
+
144
+ An example of config:
145
+ periods = [10, 10, 10, 10]
146
+ restart_weights = [1, 0.5, 0.5, 0.5]
147
+ eta_min=1e-7
148
+
149
+ It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the
150
+ scheduler will restart with the weights in restart_weights.
151
+
152
+ Args:
153
+ optimizer (torch.nn.optimizer): Torch optimizer.
154
+ periods (list): Period for each cosine anneling cycle.
155
+ restart_weights (list): Restart weights at each restart iteration.
156
+ Default: [1].
157
+ eta_min (float): The mimimum lr. Default: 0.
158
+ last_epoch (int): Used in _LRScheduler. Default: -1.
159
+ """
160
+
161
+ def __init__(self,
162
+ optimizer,
163
+ periods,
164
+ restart_weights=(1, ),
165
+ eta_min=0,
166
+ last_epoch=-1):
167
+ self.periods = periods
168
+ self.restart_weights = restart_weights
169
+ self.eta_min = eta_min
170
+ assert (len(self.periods) == len(self.restart_weights)
171
+ ), 'periods and restart_weights should have the same length.'
172
+ self.cumulative_period = [
173
+ sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
174
+ ]
175
+ super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch)
176
+
177
+ def get_lr(self):
178
+ idx = get_position_from_periods(self.last_epoch,
179
+ self.cumulative_period)
180
+ current_weight = self.restart_weights[idx]
181
+ nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1]
182
+ current_period = self.periods[idx]
183
+
184
+ return [
185
+ self.eta_min + current_weight * 0.5 * (base_lr - self.eta_min) *
186
+ (1 + math.cos(math.pi * (
187
+ (self.last_epoch - nearest_restart) / current_period)))
188
+ for base_lr in self.base_lrs
189
+ ]
NAFNet/basicsr/test.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import logging
8
+ import torch
9
+ from os import path as osp
10
+
11
+ from basicsr.data import create_dataloader, create_dataset
12
+ from basicsr.models import create_model
13
+ from basicsr.train import parse_options
14
+ from basicsr.utils import (get_env_info, get_root_logger, get_time_str,
15
+ make_exp_dirs)
16
+ from basicsr.utils.options import dict2str
17
+
18
+
19
+ def main():
20
+ # parse options, set distributed setting, set ramdom seed
21
+ opt = parse_options(is_train=False)
22
+
23
+ torch.backends.cudnn.benchmark = True
24
+ # torch.backends.cudnn.deterministic = True
25
+
26
+ # mkdir and initialize loggers
27
+ make_exp_dirs(opt)
28
+ log_file = osp.join(opt['path']['log'],
29
+ f"test_{opt['name']}_{get_time_str()}.log")
30
+ logger = get_root_logger(
31
+ logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
32
+ logger.info(get_env_info())
33
+ logger.info(dict2str(opt))
34
+
35
+ # create test dataset and dataloader
36
+ test_loaders = []
37
+ for phase, dataset_opt in sorted(opt['datasets'].items()):
38
+ if 'test' in phase:
39
+ dataset_opt['phase'] = 'test'
40
+ test_set = create_dataset(dataset_opt)
41
+ test_loader = create_dataloader(
42
+ test_set,
43
+ dataset_opt,
44
+ num_gpu=opt['num_gpu'],
45
+ dist=opt['dist'],
46
+ sampler=None,
47
+ seed=opt['manual_seed'])
48
+ logger.info(
49
+ f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
50
+ test_loaders.append(test_loader)
51
+
52
+ # create model
53
+ model = create_model(opt)
54
+
55
+ for test_loader in test_loaders:
56
+ test_set_name = test_loader.dataset.opt['name']
57
+ logger.info(f'Testing {test_set_name}...')
58
+ rgb2bgr = opt['val'].get('rgb2bgr', True)
59
+ # wheather use uint8 image to compute metrics
60
+ use_image = opt['val'].get('use_image', True)
61
+ model.validation(
62
+ test_loader,
63
+ current_iter=opt['name'],
64
+ tb_logger=None,
65
+ save_img=opt['val']['save_img'],
66
+ rgb2bgr=rgb2bgr, use_image=use_image)
67
+
68
+
69
+ if __name__ == '__main__':
70
+ main()
NAFNet/basicsr/train.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------------------
2
+ # Copyright (c) 2022 megvii-model. All Rights Reserved.
3
+ # ------------------------------------------------------------------------
4
+ # Modified from BasicSR (https://github.com/xinntao/BasicSR)
5
+ # Copyright 2018-2020 BasicSR Authors
6
+ # ------------------------------------------------------------------------
7
+ import argparse
8
+ import datetime
9
+ import logging
10
+ import math
11
+ import random
12
+ import time
13
+ import torch
14
+ from os import path as osp
15
+
16
+ from basicsr.data import create_dataloader, create_dataset
17
+ from basicsr.data.data_sampler import EnlargedSampler
18
+ from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
19
+ from basicsr.models import create_model
20
+ from basicsr.utils import (MessageLogger, check_resume, get_env_info,
21
+ get_root_logger, get_time_str, init_tb_logger,
22
+ init_wandb_logger, make_exp_dirs, mkdir_and_rename,
23
+ set_random_seed)
24
+ from basicsr.utils.dist_util import get_dist_info, init_dist
25
+ from basicsr.utils.options import dict2str, parse
26
+
27
+
28
+ def parse_options(is_train=True):
29
+ parser = argparse.ArgumentParser()
30
+ parser.add_argument(
31
+ '-opt', type=str, required=True, help='Path to option YAML file.')
32
+ parser.add_argument(
33
+ '--launcher',
34
+ choices=['none', 'pytorch', 'slurm'],
35
+ default='none',
36
+ help='job launcher')
37
+ parser.add_argument('--local_rank', type=int, default=0)
38
+
39
+ parser.add_argument('--input_path', type=str, required=False, help='The path to the input image. For single image inference only.')
40
+ parser.add_argument('--output_path', type=str, required=False, help='The path to the output image. For single image inference only.')
41
+
42
+ args = parser.parse_args()
43
+ opt = parse(args.opt, is_train=is_train)
44
+
45
+ # distributed settings
46
+ if args.launcher == 'none':
47
+ opt['dist'] = False
48
+ print('Disable distributed.', flush=True)
49
+ else:
50
+ opt['dist'] = True
51
+ if args.launcher == 'slurm' and 'dist_params' in opt:
52
+ init_dist(args.launcher, **opt['dist_params'])
53
+ else:
54
+ init_dist(args.launcher)
55
+ print('init dist .. ', args.launcher)
56
+
57
+ opt['rank'], opt['world_size'] = get_dist_info()
58
+
59
+ # random seed
60
+ seed = opt.get('manual_seed')
61
+ if seed is None:
62
+ seed = random.randint(1, 10000)
63
+ opt['manual_seed'] = seed
64
+ set_random_seed(seed + opt['rank'])
65
+
66
+ if args.input_path is not None and args.output_path is not None:
67
+ opt['img_path'] = {
68
+ 'input_img': args.input_path,
69
+ 'output_img': args.output_path
70
+ }
71
+
72
+ return opt
73
+
74
+
75
+ def init_loggers(opt):
76
+ log_file = osp.join(opt['path']['log'],
77
+ f"train_{opt['name']}_{get_time_str()}.log")
78
+ logger = get_root_logger(
79
+ logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
80
+ logger.info(get_env_info())
81
+ logger.info(dict2str(opt))
82
+
83
+ # initialize wandb logger before tensorboard logger to allow proper sync:
84
+ if (opt['logger'].get('wandb')
85
+ is not None) and (opt['logger']['wandb'].get('project')
86
+ is not None) and ('debug' not in opt['name']):
87
+ assert opt['logger'].get('use_tb_logger') is True, (
88
+ 'should turn on tensorboard when using wandb')
89
+ init_wandb_logger(opt)
90
+ tb_logger = None
91
+ if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
92
+ # tb_logger = init_tb_logger(log_dir=f'./logs/{opt['name']}') #mkdir logs @CLY
93
+ tb_logger = init_tb_logger(log_dir=osp.join('logs', opt['name']))
94
+ return logger, tb_logger
95
+
96
+
97
+ def create_train_val_dataloader(opt, logger):
98
+ # create train and val dataloaders
99
+ train_loader, val_loader = None, None
100
+ for phase, dataset_opt in opt['datasets'].items():
101
+ if phase == 'train':
102
+ dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
103
+ train_set = create_dataset(dataset_opt)
104
+ train_sampler = EnlargedSampler(train_set, opt['world_size'],
105
+ opt['rank'], dataset_enlarge_ratio)
106
+ train_loader = create_dataloader(
107
+ train_set,
108
+ dataset_opt,
109
+ num_gpu=opt['num_gpu'],
110
+ dist=opt['dist'],
111
+ sampler=train_sampler,
112
+ seed=opt['manual_seed'])
113
+
114
+ num_iter_per_epoch = math.ceil(
115
+ len(train_set) * dataset_enlarge_ratio /
116
+ (dataset_opt['batch_size_per_gpu'] * opt['world_size']))
117
+ total_iters = int(opt['train']['total_iter'])
118
+ total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
119
+ logger.info(
120
+ 'Training statistics:'
121
+ f'\n\tNumber of train images: {len(train_set)}'
122
+ f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
123
+ f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
124
+ f'\n\tWorld size (gpu number): {opt["world_size"]}'
125
+ f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
126
+ f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')
127
+
128
+ elif phase == 'val':
129
+ val_set = create_dataset(dataset_opt)
130
+ val_loader = create_dataloader(
131
+ val_set,
132
+ dataset_opt,
133
+ num_gpu=opt['num_gpu'],
134
+ dist=opt['dist'],
135
+ sampler=None,
136
+ seed=opt['manual_seed'])
137
+ logger.info(
138
+ f'Number of val images/folders in {dataset_opt["name"]}: '
139
+ f'{len(val_set)}')
140
+ else:
141
+ raise ValueError(f'Dataset phase {phase} is not recognized.')
142
+
143
+ return train_loader, train_sampler, val_loader, total_epochs, total_iters
144
+
145
+
146
+ def main():
147
+ # parse options, set distributed setting, set ramdom seed
148
+ opt = parse_options(is_train=True)
149
+
150
+ torch.backends.cudnn.benchmark = True
151
+ # torch.backends.cudnn.deterministic = True
152
+
153
+ # automatic resume ..
154
+ state_folder_path = 'experiments/{}/training_states/'.format(opt['name'])
155
+ import os
156
+ try:
157
+ states = os.listdir(state_folder_path)
158
+ except:
159
+ states = []
160
+
161
+ resume_state = None
162
+ if len(states) > 0:
163
+ print('!!!!!! resume state .. ', states, state_folder_path)
164
+ max_state_file = '{}.state'.format(max([int(x[0:-6]) for x in states]))
165
+ resume_state = os.path.join(state_folder_path, max_state_file)
166
+ opt['path']['resume_state'] = resume_state
167
+
168
+ # load resume states if necessary
169
+ if opt['path'].get('resume_state'):
170
+ device_id = torch.cuda.current_device()
171
+ resume_state = torch.load(
172
+ opt['path']['resume_state'],
173
+ map_location=lambda storage, loc: storage.cuda(device_id))
174
+ else:
175
+ resume_state = None
176
+
177
+ # mkdir for experiments and logger
178
+ if resume_state is None:
179
+ make_exp_dirs(opt)
180
+ if opt['logger'].get('use_tb_logger') and 'debug' not in opt[
181
+ 'name'] and opt['rank'] == 0:
182
+ mkdir_and_rename(osp.join('tb_logger', opt['name']))
183
+
184
+ # initialize loggers
185
+ logger, tb_logger = init_loggers(opt)
186
+
187
+ # create train and validation dataloaders
188
+ result = create_train_val_dataloader(opt, logger)
189
+ train_loader, train_sampler, val_loader, total_epochs, total_iters = result
190
+
191
+ # create model
192
+ if resume_state: # resume training
193
+ check_resume(opt, resume_state['iter'])
194
+ model = create_model(opt)
195
+ model.resume_training(resume_state) # handle optimizers and schedulers
196
+ logger.info(f"Resuming training from epoch: {resume_state['epoch']}, "
197
+ f"iter: {resume_state['iter']}.")
198
+ start_epoch = resume_state['epoch']
199
+ current_iter = resume_state['iter']
200
+ else:
201
+ model = create_model(opt)
202
+ start_epoch = 0
203
+ current_iter = 0
204
+
205
+ # create message logger (formatted outputs)
206
+ msg_logger = MessageLogger(opt, current_iter, tb_logger)
207
+
208
+ # dataloader prefetcher
209
+ prefetch_mode = opt['datasets']['train'].get('prefetch_mode')
210
+ if prefetch_mode is None or prefetch_mode == 'cpu':
211
+ prefetcher = CPUPrefetcher(train_loader)
212
+ elif prefetch_mode == 'cuda':
213
+ prefetcher = CUDAPrefetcher(train_loader, opt)
214
+ logger.info(f'Use {prefetch_mode} prefetch dataloader')
215
+ if opt['datasets']['train'].get('pin_memory') is not True:
216
+ raise ValueError('Please set pin_memory=True for CUDAPrefetcher.')
217
+ else:
218
+ raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.'
219
+ "Supported ones are: None, 'cuda', 'cpu'.")
220
+
221
+ # training
222
+ logger.info(
223
+ f'Start training from epoch: {start_epoch}, iter: {current_iter}')
224
+ data_time, iter_time = time.time(), time.time()
225
+ start_time = time.time()
226
+
227
+ # for epoch in range(start_epoch, total_epochs + 1):
228
+ epoch = start_epoch
229
+ while current_iter <= total_iters:
230
+ train_sampler.set_epoch(epoch)
231
+ prefetcher.reset()
232
+ train_data = prefetcher.next()
233
+
234
+ while train_data is not None:
235
+ data_time = time.time() - data_time
236
+
237
+ current_iter += 1
238
+ if current_iter > total_iters:
239
+ break
240
+ # update learning rate
241
+ model.update_learning_rate(
242
+ current_iter, warmup_iter=opt['train'].get('warmup_iter', -1))
243
+ # training
244
+ model.feed_data(train_data, is_val=False)
245
+ result_code = model.optimize_parameters(current_iter, tb_logger)
246
+ # if result_code == -1 and tb_logger:
247
+ # print('loss explode .. ')
248
+ # exit(0)
249
+ iter_time = time.time() - iter_time
250
+ # log
251
+ if current_iter % opt['logger']['print_freq'] == 0:
252
+ log_vars = {'epoch': epoch, 'iter': current_iter, 'total_iter': total_iters}
253
+ log_vars.update({'lrs': model.get_current_learning_rate()})
254
+ log_vars.update({'time': iter_time, 'data_time': data_time})
255
+ log_vars.update(model.get_current_log())
256
+ # print('msg logger .. ', current_iter)
257
+ msg_logger(log_vars)
258
+
259
+ # save models and training states
260
+ if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
261
+ logger.info('Saving models and training states.')
262
+ model.save(epoch, current_iter)
263
+
264
+ # validation
265
+ if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0 or current_iter == 1000):
266
+ # if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0):
267
+ rgb2bgr = opt['val'].get('rgb2bgr', True)
268
+ # wheather use uint8 image to compute metrics
269
+ use_image = opt['val'].get('use_image', True)
270
+ model.validation(val_loader, current_iter, tb_logger,
271
+ opt['val']['save_img'], rgb2bgr, use_image )
272
+ log_vars = {'epoch': epoch, 'iter': current_iter, 'total_iter': total_iters}
273
+ log_vars.update({'lrs': model.get_current_learning_rate()})
274
+ log_vars.update(model.get_current_log())
275
+ msg_logger(log_vars)
276
+
277
+
278
+ data_time = time.time()
279
+ iter_time = time.time()
280
+ train_data = prefetcher.next()
281
+ # end of iter
282
+ epoch += 1
283
+
284
+ # end of epoch
285
+
286
+ consumed_time = str(
287
+ datetime.timedelta(seconds=int(time.time() - start_time)))
288
+ logger.info(f'End of training. Time consumed: {consumed_time}')
289
+ logger.info('Save the latest model.')
290
+ model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
291
+ if opt.get('val') is not None:
292
+ rgb2bgr = opt['val'].get('rgb2bgr', True)
293
+ use_image = opt['val'].get('use_image', True)
294
+ metric = model.validation(val_loader, current_iter, tb_logger,
295
+ opt['val']['save_img'], rgb2bgr, use_image)
296
+ # if tb_logger:
297
+ # print('xxresult! ', opt['name'], ' ', metric)
298
+ if tb_logger:
299
+ tb_logger.close()
300
+
301
+
302
+ if __name__ == '__main__':
303
+ import os
304
+ os.environ['GRPC_POLL_STRATEGY']='epoll1'
305
+ main()