End of training
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- README.md +43 -0
- checkpoint-1000/optimizer.bin +3 -0
- checkpoint-1000/random_states_0.pkl +3 -0
- checkpoint-1000/scheduler.bin +3 -0
- checkpoint-1000/text_encoder/config.json +25 -0
- checkpoint-1000/text_encoder/model.safetensors +3 -0
- checkpoint-1000/unet/config.json +68 -0
- checkpoint-1000/unet/diffusion_pytorch_model.safetensors +3 -0
- checkpoint-1200/optimizer.bin +3 -0
- checkpoint-1200/random_states_0.pkl +3 -0
- checkpoint-1200/scheduler.bin +3 -0
- checkpoint-1200/text_encoder/config.json +25 -0
- checkpoint-1200/text_encoder/model.safetensors +3 -0
- checkpoint-1200/unet/config.json +68 -0
- checkpoint-1200/unet/diffusion_pytorch_model.safetensors +3 -0
- checkpoint-200/optimizer.bin +3 -0
- checkpoint-200/random_states_0.pkl +3 -0
- checkpoint-200/scheduler.bin +3 -0
- checkpoint-200/text_encoder/config.json +25 -0
- checkpoint-200/text_encoder/model.safetensors +3 -0
- checkpoint-200/unet/config.json +68 -0
- checkpoint-200/unet/diffusion_pytorch_model.safetensors +3 -0
- checkpoint-400/optimizer.bin +3 -0
- checkpoint-400/random_states_0.pkl +3 -0
- checkpoint-400/scheduler.bin +3 -0
- checkpoint-400/text_encoder/config.json +25 -0
- checkpoint-400/text_encoder/model.safetensors +3 -0
- checkpoint-400/unet/config.json +68 -0
- checkpoint-400/unet/diffusion_pytorch_model.safetensors +3 -0
- checkpoint-600/optimizer.bin +3 -0
- checkpoint-600/random_states_0.pkl +3 -0
- checkpoint-600/scheduler.bin +3 -0
- checkpoint-600/text_encoder/config.json +25 -0
- checkpoint-600/text_encoder/model.safetensors +3 -0
- checkpoint-600/unet/config.json +68 -0
- checkpoint-600/unet/diffusion_pytorch_model.safetensors +3 -0
- checkpoint-800/optimizer.bin +3 -0
- checkpoint-800/random_states_0.pkl +3 -0
- checkpoint-800/scheduler.bin +3 -0
- checkpoint-800/text_encoder/config.json +25 -0
- checkpoint-800/text_encoder/model.safetensors +3 -0
- checkpoint-800/unet/config.json +68 -0
- checkpoint-800/unet/diffusion_pytorch_model.safetensors +3 -0
- feature_extractor/preprocessor_config.json +27 -0
- logs/dreambooth/1739839038.6245437/events.out.tfevents.1739839038.baker.cs.ubc.ca.1035977.1 +3 -0
- logs/dreambooth/1739839038.6256604/hparams.yml +60 -0
- logs/dreambooth/events.out.tfevents.1739839038.baker.cs.ubc.ca.1035977.0 +3 -0
- model_index.json +38 -0
- safety_checker/config.json +29 -0
- safety_checker/model.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: CompVis/stable-diffusion-v1-4
|
3 |
+
library_name: diffusers
|
4 |
+
license: creativeml-openrail-m
|
5 |
+
inference: true
|
6 |
+
instance_prompt: a photo of sks dog
|
7 |
+
tags:
|
8 |
+
- text-to-image
|
9 |
+
- dreambooth
|
10 |
+
- diffusers-training
|
11 |
+
- stable-diffusion
|
12 |
+
- stable-diffusion-diffusers
|
13 |
+
---
|
14 |
+
|
15 |
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
16 |
+
should probably proofread and complete it, then remove this comment. -->
|
17 |
+
|
18 |
+
|
19 |
+
# DreamBooth - danielajisafe/trainable_text
|
20 |
+
|
21 |
+
This is a dreambooth model derived from CompVis/stable-diffusion-v1-4. The weights were trained on a photo of sks dog using [DreamBooth](https://dreambooth.github.io/).
|
22 |
+
You can find some example images in the following.
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
DreamBooth for the text encoder was enabled: True.
|
27 |
+
|
28 |
+
|
29 |
+
## Intended uses & limitations
|
30 |
+
|
31 |
+
#### How to use
|
32 |
+
|
33 |
+
```python
|
34 |
+
# TODO: add an example code snippet for running this diffusion pipeline
|
35 |
+
```
|
36 |
+
|
37 |
+
#### Limitations and bias
|
38 |
+
|
39 |
+
[TODO: provide examples of latent issues and potential remediations]
|
40 |
+
|
41 |
+
## Training details
|
42 |
+
|
43 |
+
[TODO: describe the data used to train the model]
|
checkpoint-1000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bd83d146afdee02df7d5fbbb30edc8f2285cdf408636d362ce2fd327e1bc9ec
|
3 |
+
size 1999458902
|
checkpoint-1000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81d2d0f615850ef8853808130f687f1f63150b25b7093b434639e7649f61eae1
|
3 |
+
size 14344
|
checkpoint-1000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cc9d529863b9458dbb9b91d10c2e89741ffa38f99dbc44be25770289ed29416
|
3 |
+
size 1000
|
checkpoint-1000/text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 512,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.48.3",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
checkpoint-1000/text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:00262ee068a35f981c167630a5d2bd3261b6a15b7135bc73b867aa16c77f8089
|
3 |
+
size 492265168
|
checkpoint-1000/unet/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"reverse_transformer_layers_per_block": null,
|
53 |
+
"sample_size": 64,
|
54 |
+
"time_cond_proj_dim": null,
|
55 |
+
"time_embedding_act_fn": null,
|
56 |
+
"time_embedding_dim": null,
|
57 |
+
"time_embedding_type": "positional",
|
58 |
+
"timestep_post_act": null,
|
59 |
+
"transformer_layers_per_block": 1,
|
60 |
+
"up_block_types": [
|
61 |
+
"UpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D",
|
64 |
+
"CrossAttnUpBlock2D"
|
65 |
+
],
|
66 |
+
"upcast_attention": false,
|
67 |
+
"use_linear_projection": false
|
68 |
+
}
|
checkpoint-1000/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d03c449fff22ed37f656ec74bdb39b64fc9de5449030e802ef7032e9a5183b48
|
3 |
+
size 3438167536
|
checkpoint-1200/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:24cd928feeead6671fe18749a1749b9d2b9ed22581d94c9fe0cf7325a8911739
|
3 |
+
size 1999458902
|
checkpoint-1200/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9093846665f47db00fe2002f12c83862d3c1777ec4f5c77f136d8a072bc98e7
|
3 |
+
size 14344
|
checkpoint-1200/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8db97925ad886aa6e1bdd8e9e3a2ac57b25214d9bc0eeda4771260b6703a4278
|
3 |
+
size 1000
|
checkpoint-1200/text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 512,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.48.3",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
checkpoint-1200/text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8fa54afd90f75bd9eda43b3b8090de9c6419c3f5620a1178bbd01177d55cf065
|
3 |
+
size 492265168
|
checkpoint-1200/unet/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"reverse_transformer_layers_per_block": null,
|
53 |
+
"sample_size": 64,
|
54 |
+
"time_cond_proj_dim": null,
|
55 |
+
"time_embedding_act_fn": null,
|
56 |
+
"time_embedding_dim": null,
|
57 |
+
"time_embedding_type": "positional",
|
58 |
+
"timestep_post_act": null,
|
59 |
+
"transformer_layers_per_block": 1,
|
60 |
+
"up_block_types": [
|
61 |
+
"UpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D",
|
64 |
+
"CrossAttnUpBlock2D"
|
65 |
+
],
|
66 |
+
"upcast_attention": false,
|
67 |
+
"use_linear_projection": false
|
68 |
+
}
|
checkpoint-1200/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb44682ca7758bd15118d2da1a9bdcc3f0568e704beea90261c808eb7123e1f3
|
3 |
+
size 3438167536
|
checkpoint-200/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:542dd3127f7b9292281d5581cea27acf598004fd01da3000bc358b3eed38121b
|
3 |
+
size 1999458006
|
checkpoint-200/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c21fe1dd696b6489fab6cc9397b0bc7f5bb46e9d9845d4f21a005eb5beef3770
|
3 |
+
size 14344
|
checkpoint-200/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:caa41152562e90f820cd8d847d8cfef38acaada6709d627eb4cde10531d6a002
|
3 |
+
size 1000
|
checkpoint-200/text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 512,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.48.3",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
checkpoint-200/text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55a8c8754ba50d38ec5e39e608ccc3e5e932a1b276754da6b9e9828cb8d26953
|
3 |
+
size 492265168
|
checkpoint-200/unet/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"reverse_transformer_layers_per_block": null,
|
53 |
+
"sample_size": 64,
|
54 |
+
"time_cond_proj_dim": null,
|
55 |
+
"time_embedding_act_fn": null,
|
56 |
+
"time_embedding_dim": null,
|
57 |
+
"time_embedding_type": "positional",
|
58 |
+
"timestep_post_act": null,
|
59 |
+
"transformer_layers_per_block": 1,
|
60 |
+
"up_block_types": [
|
61 |
+
"UpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D",
|
64 |
+
"CrossAttnUpBlock2D"
|
65 |
+
],
|
66 |
+
"upcast_attention": false,
|
67 |
+
"use_linear_projection": false
|
68 |
+
}
|
checkpoint-200/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0fb70f4516d865cafd35e8a686e29a9bc27fec3fe208f6e2c1fc2da10378749a
|
3 |
+
size 3438167536
|
checkpoint-400/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:30943ecbef38a872b9e78dcbff01b6ec650ca149f2d45d023595b7a9558d9fa8
|
3 |
+
size 1999458902
|
checkpoint-400/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d60375090486439502cfd1c89e605ae8648b97216f7e95e71004891b005f982
|
3 |
+
size 14344
|
checkpoint-400/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85ff3810d83f0315479bff9e354b8a08aed7471bec6e14d46e165fadeadba340
|
3 |
+
size 1000
|
checkpoint-400/text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 512,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.48.3",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
checkpoint-400/text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:056507a374af78412b52b84c874ab50cecfd4f3c2f642f2e24ac8cbc1f251579
|
3 |
+
size 492265168
|
checkpoint-400/unet/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"reverse_transformer_layers_per_block": null,
|
53 |
+
"sample_size": 64,
|
54 |
+
"time_cond_proj_dim": null,
|
55 |
+
"time_embedding_act_fn": null,
|
56 |
+
"time_embedding_dim": null,
|
57 |
+
"time_embedding_type": "positional",
|
58 |
+
"timestep_post_act": null,
|
59 |
+
"transformer_layers_per_block": 1,
|
60 |
+
"up_block_types": [
|
61 |
+
"UpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D",
|
64 |
+
"CrossAttnUpBlock2D"
|
65 |
+
],
|
66 |
+
"upcast_attention": false,
|
67 |
+
"use_linear_projection": false
|
68 |
+
}
|
checkpoint-400/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d859290cebf901c468ff3140edecc4575ce7906a6a83f515e924578f41c05696
|
3 |
+
size 3438167536
|
checkpoint-600/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29c64295019857be9f371f830b0f0ec6a105d5651edf92162aff43824fc30c5f
|
3 |
+
size 1999458902
|
checkpoint-600/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:36fd9c052460dd9e5382c5e225b1eef2d288ebcd6239440c977d3709768fb35a
|
3 |
+
size 14344
|
checkpoint-600/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05610676ed149d854d87e17503a2b6cd63be5d35325fd82390094dcc3fb21f57
|
3 |
+
size 1000
|
checkpoint-600/text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 512,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.48.3",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
checkpoint-600/text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf23476b4a2261a2a5747b3bb23e9e37ed69c0968e2129a1df8331243dd75280
|
3 |
+
size 492265168
|
checkpoint-600/unet/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"reverse_transformer_layers_per_block": null,
|
53 |
+
"sample_size": 64,
|
54 |
+
"time_cond_proj_dim": null,
|
55 |
+
"time_embedding_act_fn": null,
|
56 |
+
"time_embedding_dim": null,
|
57 |
+
"time_embedding_type": "positional",
|
58 |
+
"timestep_post_act": null,
|
59 |
+
"transformer_layers_per_block": 1,
|
60 |
+
"up_block_types": [
|
61 |
+
"UpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D",
|
64 |
+
"CrossAttnUpBlock2D"
|
65 |
+
],
|
66 |
+
"upcast_attention": false,
|
67 |
+
"use_linear_projection": false
|
68 |
+
}
|
checkpoint-600/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:63ea8f1ce645a32e0f44ffb646dd7ee3c20474b4fbad9ab63ea0fbbab0caa0c1
|
3 |
+
size 3438167536
|
checkpoint-800/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41f0fdaf7adbeee950a68963d832aeeb6c807ead44588c6f3c41a2f3fc867080
|
3 |
+
size 1999458902
|
checkpoint-800/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a5964c740040a28bd8c8703243878299a8a27fdb0ced2ba801d0247eb1ea4a75
|
3 |
+
size 14344
|
checkpoint-800/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92bcc680006bec7a1cd14e8dc1a1b8507ffe1bb293974bdc46a31a9c1e17427f
|
3 |
+
size 1000
|
checkpoint-800/text_encoder/config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPTextModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"dropout": 0.0,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"hidden_act": "quick_gelu",
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_factor": 1.0,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 3072,
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 77,
|
17 |
+
"model_type": "clip_text_model",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 12,
|
20 |
+
"pad_token_id": 1,
|
21 |
+
"projection_dim": 512,
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.48.3",
|
24 |
+
"vocab_size": 49408
|
25 |
+
}
|
checkpoint-800/text_encoder/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:853f1af913e95e18f3d6df40b567314fcd0c2fdfb5e59c329ffae15b0eefa0e5
|
3 |
+
size 492265168
|
checkpoint-800/unet/config.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "UNet2DConditionModel",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": null,
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": null,
|
9 |
+
"attention_head_dim": 8,
|
10 |
+
"attention_type": "default",
|
11 |
+
"block_out_channels": [
|
12 |
+
320,
|
13 |
+
640,
|
14 |
+
1280,
|
15 |
+
1280
|
16 |
+
],
|
17 |
+
"center_input_sample": false,
|
18 |
+
"class_embed_type": null,
|
19 |
+
"class_embeddings_concat": false,
|
20 |
+
"conv_in_kernel": 3,
|
21 |
+
"conv_out_kernel": 3,
|
22 |
+
"cross_attention_dim": 768,
|
23 |
+
"cross_attention_norm": null,
|
24 |
+
"down_block_types": [
|
25 |
+
"CrossAttnDownBlock2D",
|
26 |
+
"CrossAttnDownBlock2D",
|
27 |
+
"CrossAttnDownBlock2D",
|
28 |
+
"DownBlock2D"
|
29 |
+
],
|
30 |
+
"downsample_padding": 1,
|
31 |
+
"dropout": 0.0,
|
32 |
+
"dual_cross_attention": false,
|
33 |
+
"encoder_hid_dim": null,
|
34 |
+
"encoder_hid_dim_type": null,
|
35 |
+
"flip_sin_to_cos": true,
|
36 |
+
"freq_shift": 0,
|
37 |
+
"in_channels": 4,
|
38 |
+
"layers_per_block": 2,
|
39 |
+
"mid_block_only_cross_attention": null,
|
40 |
+
"mid_block_scale_factor": 1,
|
41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
+
"norm_eps": 1e-05,
|
43 |
+
"norm_num_groups": 32,
|
44 |
+
"num_attention_heads": null,
|
45 |
+
"num_class_embeds": null,
|
46 |
+
"only_cross_attention": false,
|
47 |
+
"out_channels": 4,
|
48 |
+
"projection_class_embeddings_input_dim": null,
|
49 |
+
"resnet_out_scale_factor": 1.0,
|
50 |
+
"resnet_skip_time_act": false,
|
51 |
+
"resnet_time_scale_shift": "default",
|
52 |
+
"reverse_transformer_layers_per_block": null,
|
53 |
+
"sample_size": 64,
|
54 |
+
"time_cond_proj_dim": null,
|
55 |
+
"time_embedding_act_fn": null,
|
56 |
+
"time_embedding_dim": null,
|
57 |
+
"time_embedding_type": "positional",
|
58 |
+
"timestep_post_act": null,
|
59 |
+
"transformer_layers_per_block": 1,
|
60 |
+
"up_block_types": [
|
61 |
+
"UpBlock2D",
|
62 |
+
"CrossAttnUpBlock2D",
|
63 |
+
"CrossAttnUpBlock2D",
|
64 |
+
"CrossAttnUpBlock2D"
|
65 |
+
],
|
66 |
+
"upcast_attention": false,
|
67 |
+
"use_linear_projection": false
|
68 |
+
}
|
checkpoint-800/unet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:436b32e9d8b68a81ffc144c63c9e4b979356996a4c945893b5a0f73be1319486
|
3 |
+
size 3438167536
|
feature_extractor/preprocessor_config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": {
|
3 |
+
"height": 224,
|
4 |
+
"width": 224
|
5 |
+
},
|
6 |
+
"do_center_crop": true,
|
7 |
+
"do_convert_rgb": true,
|
8 |
+
"do_normalize": true,
|
9 |
+
"do_rescale": true,
|
10 |
+
"do_resize": true,
|
11 |
+
"image_mean": [
|
12 |
+
0.48145466,
|
13 |
+
0.4578275,
|
14 |
+
0.40821073
|
15 |
+
],
|
16 |
+
"image_processor_type": "CLIPImageProcessor",
|
17 |
+
"image_std": [
|
18 |
+
0.26862954,
|
19 |
+
0.26130258,
|
20 |
+
0.27577711
|
21 |
+
],
|
22 |
+
"resample": 3,
|
23 |
+
"rescale_factor": 0.00392156862745098,
|
24 |
+
"size": {
|
25 |
+
"shortest_edge": 224
|
26 |
+
}
|
27 |
+
}
|
logs/dreambooth/1739839038.6245437/events.out.tfevents.1739839038.baker.cs.ubc.ca.1035977.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:166afccbc0b0fe7e6e467a3a585e492a26ae1bcca7cd400af6c805156e6d61bc
|
3 |
+
size 2894
|
logs/dreambooth/1739839038.6256604/hparams.yml
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
adam_beta1: 0.9
|
2 |
+
adam_beta2: 0.999
|
3 |
+
adam_epsilon: 1.0e-08
|
4 |
+
adam_weight_decay: 0.01
|
5 |
+
allow_tf32: false
|
6 |
+
center_crop: false
|
7 |
+
checkpointing_steps: 200
|
8 |
+
checkpoints_total_limit: null
|
9 |
+
class_data_dir: ./dog_class_prior/trainable_text
|
10 |
+
class_labels_conditioning: null
|
11 |
+
class_prompt: a photo of dog
|
12 |
+
dataloader_num_workers: 0
|
13 |
+
enable_xformers_memory_efficient_attention: false
|
14 |
+
gradient_accumulation_steps: 1
|
15 |
+
gradient_checkpointing: true
|
16 |
+
hub_model_id: null
|
17 |
+
hub_token: null
|
18 |
+
instance_data_dir: dog
|
19 |
+
instance_prompt: a photo of sks dog
|
20 |
+
learning_rate: 1.0e-06
|
21 |
+
local_rank: -1
|
22 |
+
logging_dir: logs
|
23 |
+
lr_num_cycles: 1
|
24 |
+
lr_power: 1.0
|
25 |
+
lr_scheduler: constant
|
26 |
+
lr_warmup_steps: 0
|
27 |
+
max_grad_norm: 1.0
|
28 |
+
max_train_steps: 1200
|
29 |
+
mixed_precision: null
|
30 |
+
num_class_images: 200
|
31 |
+
num_train_epochs: 6
|
32 |
+
num_validation_images: 4
|
33 |
+
offset_noise: false
|
34 |
+
output_dir: ./model_checkpoints/trainable_text
|
35 |
+
pre_compute_text_embeddings: false
|
36 |
+
pretrained_model_name_or_path: CompVis/stable-diffusion-v1-4
|
37 |
+
prior_generation_precision: null
|
38 |
+
prior_loss_weight: 1.0
|
39 |
+
push_to_hub: true
|
40 |
+
report_to: tensorboard
|
41 |
+
resolution: 512
|
42 |
+
resume_from_checkpoint: null
|
43 |
+
revision: null
|
44 |
+
sample_batch_size: 4
|
45 |
+
scale_lr: false
|
46 |
+
seed: null
|
47 |
+
set_grads_to_none: false
|
48 |
+
skip_save_text_encoder: false
|
49 |
+
snr_gamma: null
|
50 |
+
text_encoder_use_attention_mask: false
|
51 |
+
tokenizer_max_length: null
|
52 |
+
tokenizer_name: null
|
53 |
+
train_batch_size: 1
|
54 |
+
train_text_encoder: true
|
55 |
+
use_8bit_adam: true
|
56 |
+
validation_prompt: null
|
57 |
+
validation_scheduler: DPMSolverMultistepScheduler
|
58 |
+
validation_steps: 100
|
59 |
+
variant: null
|
60 |
+
with_prior_preservation: true
|
logs/dreambooth/events.out.tfevents.1739839038.baker.cs.ubc.ca.1035977.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a929aa81910d4c43d747329524ae6c888e8493956c676b4c9dcd3333ba229519
|
3 |
+
size 100634
|
model_index.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "StableDiffusionPipeline",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
+
"_name_or_path": "CompVis/stable-diffusion-v1-4",
|
5 |
+
"feature_extractor": [
|
6 |
+
"transformers",
|
7 |
+
"CLIPImageProcessor"
|
8 |
+
],
|
9 |
+
"image_encoder": [
|
10 |
+
null,
|
11 |
+
null
|
12 |
+
],
|
13 |
+
"requires_safety_checker": true,
|
14 |
+
"safety_checker": [
|
15 |
+
"stable_diffusion",
|
16 |
+
"StableDiffusionSafetyChecker"
|
17 |
+
],
|
18 |
+
"scheduler": [
|
19 |
+
"diffusers",
|
20 |
+
"PNDMScheduler"
|
21 |
+
],
|
22 |
+
"text_encoder": [
|
23 |
+
"transformers",
|
24 |
+
"CLIPTextModel"
|
25 |
+
],
|
26 |
+
"tokenizer": [
|
27 |
+
"transformers",
|
28 |
+
"CLIPTokenizer"
|
29 |
+
],
|
30 |
+
"unet": [
|
31 |
+
"diffusers",
|
32 |
+
"UNet2DConditionModel"
|
33 |
+
],
|
34 |
+
"vae": [
|
35 |
+
"diffusers",
|
36 |
+
"AutoencoderKL"
|
37 |
+
]
|
38 |
+
}
|
safety_checker/config.json
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/scratch/dajisafe/Track_Diffusion/Dreambooth/.cache/huggingface/hub/models--CompVis--stable-diffusion-v1-4/snapshots/133a221b8aa7292a167afc5127cb63fb5005638b/safety_checker",
|
3 |
+
"architectures": [
|
4 |
+
"StableDiffusionSafetyChecker"
|
5 |
+
],
|
6 |
+
"initializer_factor": 1.0,
|
7 |
+
"logit_scale_init_value": 2.6592,
|
8 |
+
"model_type": "clip",
|
9 |
+
"projection_dim": 768,
|
10 |
+
"text_config": {
|
11 |
+
"dropout": 0.0,
|
12 |
+
"hidden_size": 768,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"model_type": "clip_text_model",
|
15 |
+
"num_attention_heads": 12
|
16 |
+
},
|
17 |
+
"torch_dtype": "float32",
|
18 |
+
"transformers_version": "4.48.3",
|
19 |
+
"vision_config": {
|
20 |
+
"_attn_implementation_autoset": true,
|
21 |
+
"dropout": 0.0,
|
22 |
+
"hidden_size": 1024,
|
23 |
+
"intermediate_size": 4096,
|
24 |
+
"model_type": "clip_vision_model",
|
25 |
+
"num_attention_heads": 16,
|
26 |
+
"num_hidden_layers": 24,
|
27 |
+
"patch_size": 14
|
28 |
+
}
|
29 |
+
}
|
safety_checker/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb351a5ded815c3ff744968ad9c6b218d071b9d313d04f35e813b84b4c0ffde8
|
3 |
+
size 1215979664
|