Upload 4 files
Browse files
wt_mapper_70k_sd2_idloss/checkpoints/embeddings_gs-139999.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b96b78f153f53fba4e8e7d2e66ad2a9ff0d78616931380f7c8bb63785567fc5e
|
3 |
+
size 153224563
|
wt_mapper_70k_sd2_idloss/checkpoints/last.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d030ad8db708280fcae77d87e973102039acd23a11bdecc3db8eb6c0ac940ee1
|
3 |
+
size 431
|
wt_mapper_70k_sd2_idloss/configs/wt_mapper_70k_sd2_idloss-lightning.yaml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
lightning:
|
2 |
+
modelcheckpoint:
|
3 |
+
params:
|
4 |
+
every_n_train_steps: 20000
|
5 |
+
callbacks:
|
6 |
+
image_logger:
|
7 |
+
target: main.ImageLogger
|
8 |
+
params:
|
9 |
+
batch_frequency: 10000
|
10 |
+
max_images: 8
|
11 |
+
increase_log_steps: false
|
12 |
+
trainer:
|
13 |
+
benchmark: true
|
14 |
+
max_steps: 150000
|
15 |
+
accelerator: ddp
|
16 |
+
gpus: 0,
|
wt_mapper_70k_sd2_idloss/configs/wt_mapper_70k_sd2_idloss-project.yaml
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model:
|
2 |
+
base_learning_rate: 5.0e-05
|
3 |
+
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
+
params:
|
5 |
+
linear_start: 0.00085
|
6 |
+
linear_end: 0.012
|
7 |
+
num_timesteps_cond: 1
|
8 |
+
log_every_t: 200
|
9 |
+
timesteps: 1000
|
10 |
+
first_stage_key: image
|
11 |
+
cond_stage_key: caption
|
12 |
+
image_size: 64
|
13 |
+
channels: 4
|
14 |
+
cond_stage_trainable: true
|
15 |
+
conditioning_key: crossattn
|
16 |
+
monitor: val/loss_simple_ema
|
17 |
+
scale_factor: 0.18215
|
18 |
+
use_ema: false
|
19 |
+
embedding_reg_weight: 0.0
|
20 |
+
unfreeze_model: false
|
21 |
+
model_lr: 0.0
|
22 |
+
use_face_masking: false
|
23 |
+
use_faceid_loss: true
|
24 |
+
id_loss_weight: 2
|
25 |
+
personalization_config:
|
26 |
+
target: ldm.modules.embedding_manager.EmbeddingManagerId
|
27 |
+
params:
|
28 |
+
placeholder_strings:
|
29 |
+
- sks
|
30 |
+
- ks
|
31 |
+
- ata
|
32 |
+
- tre
|
33 |
+
- ry
|
34 |
+
- bop
|
35 |
+
- rn
|
36 |
+
- '&'
|
37 |
+
- '*'
|
38 |
+
- '`'
|
39 |
+
initializer_words:
|
40 |
+
- face
|
41 |
+
- face
|
42 |
+
- face
|
43 |
+
- face
|
44 |
+
- face
|
45 |
+
- face
|
46 |
+
- face
|
47 |
+
- face
|
48 |
+
- face
|
49 |
+
- face
|
50 |
+
max_ids: 20
|
51 |
+
num_embeds_per_token: 2
|
52 |
+
meta_mlp_depth: 4
|
53 |
+
loss_type: none
|
54 |
+
meta_inner_dim: 512
|
55 |
+
context_dim: 1024
|
56 |
+
meta_heads: 1
|
57 |
+
use_rm_mlp: false
|
58 |
+
test_mode: image
|
59 |
+
momentum: 0.99
|
60 |
+
save_fp16: false
|
61 |
+
use_aligned_faces: true
|
62 |
+
use_regularization: true
|
63 |
+
regularization_weight: 1.0e-07
|
64 |
+
use_norm_reg: false
|
65 |
+
use_hspace: false
|
66 |
+
use_stylegan_based_mapper: true
|
67 |
+
use_timestep_embedder: true
|
68 |
+
use_celeb_basis: false
|
69 |
+
use_basis_offset: false
|
70 |
+
shift_basis: false
|
71 |
+
embedding_manager_ckpt: ''
|
72 |
+
unet_config:
|
73 |
+
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
74 |
+
params:
|
75 |
+
use_fp16: false
|
76 |
+
image_size: 32
|
77 |
+
in_channels: 4
|
78 |
+
out_channels: 4
|
79 |
+
model_channels: 320
|
80 |
+
attention_resolutions:
|
81 |
+
- 4
|
82 |
+
- 2
|
83 |
+
- 1
|
84 |
+
num_res_blocks: 2
|
85 |
+
channel_mult:
|
86 |
+
- 1
|
87 |
+
- 2
|
88 |
+
- 4
|
89 |
+
- 4
|
90 |
+
num_head_channels: 64
|
91 |
+
use_spatial_transformer: true
|
92 |
+
use_linear_in_transformer: true
|
93 |
+
transformer_depth: 1
|
94 |
+
context_dim: 1024
|
95 |
+
use_checkpoint: true
|
96 |
+
legacy: false
|
97 |
+
first_stage_config:
|
98 |
+
target: ldm.models.autoencoder.AutoencoderKL
|
99 |
+
params:
|
100 |
+
embed_dim: 4
|
101 |
+
monitor: val/rec_loss
|
102 |
+
ddconfig:
|
103 |
+
double_z: true
|
104 |
+
z_channels: 4
|
105 |
+
resolution: 512
|
106 |
+
in_channels: 3
|
107 |
+
out_ch: 3
|
108 |
+
ch: 128
|
109 |
+
ch_mult:
|
110 |
+
- 1
|
111 |
+
- 2
|
112 |
+
- 4
|
113 |
+
- 4
|
114 |
+
num_res_blocks: 2
|
115 |
+
attn_resolutions: []
|
116 |
+
dropout: 0.0
|
117 |
+
lossconfig:
|
118 |
+
target: torch.nn.Identity
|
119 |
+
cond_stage_config:
|
120 |
+
target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
|
121 |
+
params:
|
122 |
+
freeze: true
|
123 |
+
layer: penultimate
|
124 |
+
use_celeb: true
|
125 |
+
use_svd: true
|
126 |
+
rm_repeats: true
|
127 |
+
celeb_txt: ./infer_images/wiki_names_v2.txt
|
128 |
+
n_components: 512
|
129 |
+
use_sample_reduce: false
|
130 |
+
n_samples: 513
|
131 |
+
use_flatten: false
|
132 |
+
num_embeds_per_token: 2
|
133 |
+
use_clip_face_basis: false
|
134 |
+
face_basis_path: ./weights/clip_face_basis100k_pca_wo_mean.pkl
|
135 |
+
ckpt_path: ./weights/v2-1_512-ema-pruned.ckpt
|
136 |
+
data:
|
137 |
+
target: main.DataModuleFromConfig
|
138 |
+
params:
|
139 |
+
batch_size: 2
|
140 |
+
num_workers: 8
|
141 |
+
wrap: false
|
142 |
+
train:
|
143 |
+
target: ldm.data.face_id.FFhq_dataset
|
144 |
+
params:
|
145 |
+
root_dir: ../../datasets/ffhq/
|
146 |
+
split: train
|
147 |
+
use_aug: false
|
148 |
+
image_size: 512
|
149 |
+
limit_dataset_size: 70000
|
150 |
+
use_data_interpolation: false
|
151 |
+
validation:
|
152 |
+
target: ldm.data.face_id.FFhq_dataset
|
153 |
+
params:
|
154 |
+
root_dir: ../../datasets/ffhq/
|
155 |
+
split: val
|
156 |
+
image_size: 512
|
157 |
+
limit_dataset_size: -1
|
158 |
+
use_aug: false
|