Hamnivore commited on
Commit
17b66e9
·
1 Parent(s): d7c6feb

undid changes to the requirements

Browse files
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
- --index-url https://download.pytorch.org/whl/cpu
2
- torch==2.2.2
3
- torchvision==0.17.2
4
- torchaudio==2.2.2
5
  albumentations==0.4.3
6
  opencv-python-headless==4.9.0.80
7
  pudb==2019.2
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu121
2
+ torch==2.5.1
3
+ torchvision==0.20.1
4
+ torchaudio==2.5.1
5
  albumentations==0.4.3
6
  opencv-python-headless==4.9.0.80
7
  pudb==2019.2
weights/albedo/configs/albedo_project.yaml CHANGED
@@ -1,3 +1,109 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8703f33e53258c5597934128f94ae01c4e2c7c0dce84e3c90028ff32fd10719
3
- size 2526
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.0001
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.012
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image_target
11
+ cond_stage_key: image_cond
12
+ image_size: 32
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: hybrid
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ scheduler_config:
19
+ target: ldm.lr_scheduler.LambdaLinearScheduler
20
+ params:
21
+ warm_up_steps:
22
+ - 100
23
+ cycle_lengths:
24
+ - 10000000000000
25
+ f_start:
26
+ - 1.0e-06
27
+ f_max:
28
+ - 1.0
29
+ f_min:
30
+ - 1.0
31
+ unet_config:
32
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
33
+ params:
34
+ image_size: 32
35
+ in_channels: 8
36
+ out_channels: 4
37
+ model_channels: 320
38
+ attention_resolutions:
39
+ - 4
40
+ - 2
41
+ - 1
42
+ num_res_blocks: 2
43
+ channel_mult:
44
+ - 1
45
+ - 2
46
+ - 4
47
+ - 4
48
+ num_heads: 8
49
+ use_spatial_transformer: true
50
+ transformer_depth: 1
51
+ context_dim: 768
52
+ use_checkpoint: true
53
+ legacy: false
54
+ first_stage_config:
55
+ target: ldm.models.autoencoder.AutoencoderKL
56
+ params:
57
+ embed_dim: 4
58
+ monitor: val/rec_loss
59
+ ddconfig:
60
+ double_z: true
61
+ z_channels: 4
62
+ resolution: 256
63
+ in_channels: 3
64
+ out_ch: 3
65
+ ch: 128
66
+ ch_mult:
67
+ - 1
68
+ - 2
69
+ - 4
70
+ - 4
71
+ num_res_blocks: 2
72
+ attn_resolutions: []
73
+ dropout: 0.0
74
+ lossconfig:
75
+ target: torch.nn.Identity
76
+ cond_stage_config:
77
+ target: ldm.modules.encoders.modules.FrozenCLIPImageEmbedder
78
+ data:
79
+ target: ldm.data.simple.ObjaverseDataModuleFromConfig
80
+ params:
81
+ target_name: albedo
82
+ root_dir: data/objaverse_rendering/samll-dataset
83
+ batch_size: 128
84
+ num_workers: 16
85
+ tar_config:
86
+ list_dir: data/big_data_lists
87
+ tar_dir: data/big_data
88
+ img_per_obj: 10
89
+ objaverse_data_list:
90
+ image_list_cache_path: image_lists/64000_10_image_list.npz
91
+ obj_starts:
92
+ - 0
93
+ - 5000
94
+ - 15000
95
+ obj_ends:
96
+ - 2000
97
+ - 7000
98
+ - 17000
99
+ num_envs: 50
100
+ num_imgs: 1
101
+ train:
102
+ validation: false
103
+ image_transforms:
104
+ size: 256
105
+ validation:
106
+ validation: true
107
+ image_transforms:
108
+ size: 256
109
+ use_wds: true
weights/specular/configs/specular_project.yaml CHANGED
@@ -1,3 +1,109 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f1b8dc5694612815974312d64403afcdad9c089b350ab6701b5bb1e3149f8e8
3
- size 2532
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 0.0001
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.012
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: image_target
11
+ cond_stage_key: image_cond
12
+ image_size: 32
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: hybrid
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ scheduler_config:
19
+ target: ldm.lr_scheduler.LambdaLinearScheduler
20
+ params:
21
+ warm_up_steps:
22
+ - 100
23
+ cycle_lengths:
24
+ - 10000000000000
25
+ f_start:
26
+ - 1.0e-06
27
+ f_max:
28
+ - 1.0
29
+ f_min:
30
+ - 1.0
31
+ unet_config:
32
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
33
+ params:
34
+ image_size: 32
35
+ in_channels: 8
36
+ out_channels: 4
37
+ model_channels: 320
38
+ attention_resolutions:
39
+ - 4
40
+ - 2
41
+ - 1
42
+ num_res_blocks: 2
43
+ channel_mult:
44
+ - 1
45
+ - 2
46
+ - 4
47
+ - 4
48
+ num_heads: 8
49
+ use_spatial_transformer: true
50
+ transformer_depth: 1
51
+ context_dim: 768
52
+ use_checkpoint: true
53
+ legacy: false
54
+ first_stage_config:
55
+ target: ldm.models.autoencoder.AutoencoderKL
56
+ params:
57
+ embed_dim: 4
58
+ monitor: val/rec_loss
59
+ ddconfig:
60
+ double_z: true
61
+ z_channels: 4
62
+ resolution: 256
63
+ in_channels: 3
64
+ out_ch: 3
65
+ ch: 128
66
+ ch_mult:
67
+ - 1
68
+ - 2
69
+ - 4
70
+ - 4
71
+ num_res_blocks: 2
72
+ attn_resolutions: []
73
+ dropout: 0.0
74
+ lossconfig:
75
+ target: torch.nn.Identity
76
+ cond_stage_config:
77
+ target: ldm.modules.encoders.modules.FrozenCLIPImageEmbedder
78
+ data:
79
+ target: ldm.data.simple.ObjaverseDataModuleFromConfig
80
+ params:
81
+ target_name: gloss_shaded
82
+ root_dir: data/objaverse_rendering/samll-dataset
83
+ batch_size: 128
84
+ num_workers: 16
85
+ tar_config:
86
+ list_dir: data/big_data_lists
87
+ tar_dir: data/big_data
88
+ img_per_obj: 10
89
+ objaverse_data_list:
90
+ image_list_cache_path: image_lists/64000_10_image_list.npz
91
+ obj_starts:
92
+ - 0
93
+ - 5000
94
+ - 15000
95
+ obj_ends:
96
+ - 2000
97
+ - 7000
98
+ - 17000
99
+ num_envs: 50
100
+ num_imgs: 1
101
+ train:
102
+ validation: false
103
+ image_transforms:
104
+ size: 256
105
+ validation:
106
+ validation: true
107
+ image_transforms:
108
+ size: 256
109
+ use_wds: true