dacorvo HF Staff commited on
Commit
783b738
·
verified ·
1 Parent(s): 5166b6a

Synchronizing local compiler cache.

Browse files
.gitattributes CHANGED
@@ -10669,3 +10669,7 @@ neuronxcc-2.19.8089.0+8ab9f450/MODULE_db907c4f25d2a5f578f1/text_encoder/model.ne
10669
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_db907c4f25d2a5f578f1/unet/model.neuron filter=lfs diff=lfs merge=lfs -text
10670
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_db907c4f25d2a5f578f1/vae_decoder/model.neuron filter=lfs diff=lfs merge=lfs -text
10671
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_db907c4f25d2a5f578f1/vae_encoder/model.neuron filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
10669
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_db907c4f25d2a5f578f1/unet/model.neuron filter=lfs diff=lfs merge=lfs -text
10670
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_db907c4f25d2a5f578f1/vae_decoder/model.neuron filter=lfs diff=lfs merge=lfs -text
10671
  neuronxcc-2.19.8089.0+8ab9f450/MODULE_db907c4f25d2a5f578f1/vae_encoder/model.neuron filter=lfs diff=lfs merge=lfs -text
10672
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/text_encoder_2/model.neuron filter=lfs diff=lfs merge=lfs -text
10673
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/unet/model.neuron filter=lfs diff=lfs merge=lfs -text
10674
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/vae_decoder/model.neuron filter=lfs diff=lfs merge=lfs -text
10675
+ neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/vae_encoder/model.neuron filter=lfs diff=lfs merge=lfs -text
neuronxcc-2.19.8089.0+8ab9f450/0_REGISTRY/0.3.1.dev3/stable-diffusion/stabilityai/stable-diffusion-xl-refiner-1.0/006ebbe04966f503c7ba.json ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "MultiModelCacheEntry",
3
+ "_model_id": "stabilityai/stable-diffusion-xl-refiner-1.0",
4
+ "_task": null,
5
+ "text_encoder_2": {
6
+ "architectures": [
7
+ "CLIPTextModelWithProjection"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "dropout": 0.0,
11
+ "export_model_type": "clip-text-with-projection",
12
+ "hidden_act": "gelu",
13
+ "hidden_size": 1280,
14
+ "initializer_factor": 1.0,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 5120,
17
+ "layer_norm_eps": 1e-05,
18
+ "max_position_embeddings": 77,
19
+ "model_type": "clip_text_model",
20
+ "neuron": {
21
+ "auto_cast": "matmul",
22
+ "auto_cast_type": "bf16",
23
+ "compiler_type": "neuronx-cc",
24
+ "compiler_version": "2.19.8089.0+8ab9f450",
25
+ "dynamic_batch_size": false,
26
+ "float_dtype": "fp32",
27
+ "inline_weights_to_neff": false,
28
+ "int_dtype": "int64",
29
+ "optlevel": "2",
30
+ "output_attentions": false,
31
+ "output_hidden_states": false,
32
+ "static_batch_size": 1,
33
+ "static_sequence_length": 77,
34
+ "task": "feature-extraction",
35
+ "tensor_parallel_size": 1
36
+ },
37
+ "num_attention_heads": 20,
38
+ "num_hidden_layers": 32,
39
+ "output_hidden_states": true,
40
+ "return_dict": false,
41
+ "vocab_size": 49408
42
+ },
43
+ "unet": {
44
+ "_class_name": "UNet2DConditionModel",
45
+ "act_fn": "silu",
46
+ "addition_embed_type": "text_time",
47
+ "addition_embed_type_num_heads": 64,
48
+ "addition_time_embed_dim": 256,
49
+ "attention_head_dim": [
50
+ 6,
51
+ 12,
52
+ 24,
53
+ 24
54
+ ],
55
+ "attention_type": "default",
56
+ "block_out_channels": [
57
+ 384,
58
+ 768,
59
+ 1536,
60
+ 1536
61
+ ],
62
+ "center_input_sample": false,
63
+ "class_embed_type": null,
64
+ "class_embeddings_concat": false,
65
+ "conv_in_kernel": 3,
66
+ "conv_out_kernel": 3,
67
+ "cross_attention_dim": 1280,
68
+ "cross_attention_norm": null,
69
+ "down_block_types": [
70
+ "DownBlock2D",
71
+ "CrossAttnDownBlock2D",
72
+ "CrossAttnDownBlock2D",
73
+ "DownBlock2D"
74
+ ],
75
+ "downsample_padding": 1,
76
+ "dropout": 0.0,
77
+ "dual_cross_attention": false,
78
+ "encoder_hid_dim": null,
79
+ "encoder_hid_dim_type": null,
80
+ "flip_sin_to_cos": true,
81
+ "freq_shift": 0,
82
+ "in_channels": 4,
83
+ "layers_per_block": 2,
84
+ "mid_block_only_cross_attention": null,
85
+ "mid_block_scale_factor": 1,
86
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
87
+ "neuron": {
88
+ "auto_cast": "matmul",
89
+ "auto_cast_type": "bf16",
90
+ "compiler_type": "neuronx-cc",
91
+ "compiler_version": "2.19.8089.0+8ab9f450",
92
+ "dynamic_batch_size": false,
93
+ "float_dtype": "fp32",
94
+ "inline_weights_to_neff": true,
95
+ "int_dtype": "int64",
96
+ "optlevel": "2",
97
+ "output_attentions": false,
98
+ "output_hidden_states": false,
99
+ "static_batch_size": 1,
100
+ "static_height": 128,
101
+ "static_num_channels": 4,
102
+ "static_sequence_length": 77,
103
+ "static_vae_scale_factor": 8,
104
+ "static_width": 128,
105
+ "task": "semantic-segmentation",
106
+ "tensor_parallel_size": 1
107
+ },
108
+ "norm_eps": 1e-05,
109
+ "norm_num_groups": 32,
110
+ "num_attention_heads": null,
111
+ "num_class_embeds": null,
112
+ "only_cross_attention": false,
113
+ "out_channels": 4,
114
+ "output_attentions": false,
115
+ "projection_class_embeddings_input_dim": 2560,
116
+ "resnet_out_scale_factor": 1.0,
117
+ "resnet_skip_time_act": false,
118
+ "resnet_time_scale_shift": "default",
119
+ "reverse_transformer_layers_per_block": null,
120
+ "time_cond_proj_dim": null,
121
+ "time_embedding_act_fn": null,
122
+ "time_embedding_dim": null,
123
+ "time_embedding_type": "positional",
124
+ "timestep_post_act": null,
125
+ "transformer_layers_per_block": 4,
126
+ "up_block_types": [
127
+ "UpBlock2D",
128
+ "CrossAttnUpBlock2D",
129
+ "CrossAttnUpBlock2D",
130
+ "UpBlock2D"
131
+ ],
132
+ "upcast_attention": null,
133
+ "use_linear_projection": true
134
+ }
135
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/model_index.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLImg2ImgPipeline",
3
+ "_diffusers_version": "0.35.1",
4
+ "_name_or_path": "stabilityai/stable-diffusion-xl-refiner-1.0",
5
+ "feature_extractor": [
6
+ null,
7
+ null
8
+ ],
9
+ "force_zeros_for_empty_prompt": false,
10
+ "image_encoder": [
11
+ null,
12
+ null
13
+ ],
14
+ "requires_aesthetics_score": true,
15
+ "scheduler": [
16
+ "diffusers",
17
+ "EulerDiscreteScheduler"
18
+ ],
19
+ "text_encoder": [
20
+ null,
21
+ null
22
+ ],
23
+ "text_encoder_2": [
24
+ "transformers",
25
+ "CLIPTextModelWithProjection"
26
+ ],
27
+ "tokenizer": [
28
+ null,
29
+ null
30
+ ],
31
+ "tokenizer_2": [
32
+ "transformers",
33
+ "CLIPTokenizer"
34
+ ],
35
+ "unet": [
36
+ "diffusers",
37
+ "UNet2DConditionModel"
38
+ ],
39
+ "vae": [
40
+ "diffusers",
41
+ "AutoencoderKL"
42
+ ]
43
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.35.1",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "final_sigmas_type": "zero",
9
+ "interpolation_type": "linear",
10
+ "num_train_timesteps": 1000,
11
+ "prediction_type": "epsilon",
12
+ "rescale_betas_zero_snr": false,
13
+ "sample_max_value": 1.0,
14
+ "set_alpha_to_one": false,
15
+ "sigma_max": null,
16
+ "sigma_min": null,
17
+ "skip_prk_steps": true,
18
+ "steps_offset": 1,
19
+ "timestep_spacing": "leading",
20
+ "timestep_type": "discrete",
21
+ "trained_betas": null,
22
+ "use_beta_sigmas": false,
23
+ "use_exponential_sigmas": false,
24
+ "use_karras_sigmas": false
25
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/text_encoder_2/config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "export_model_type": "clip-text-with-projection",
10
+ "hidden_act": "gelu",
11
+ "hidden_size": 1280,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 5120,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "neuron": {
19
+ "auto_cast": "matmul",
20
+ "auto_cast_type": "bf16",
21
+ "compiler_type": "neuronx-cc",
22
+ "compiler_version": "2.19.8089.0+8ab9f450",
23
+ "dynamic_batch_size": false,
24
+ "float_dtype": "fp32",
25
+ "inline_weights_to_neff": false,
26
+ "input_names": [
27
+ "input_ids"
28
+ ],
29
+ "int_dtype": "int64",
30
+ "model_type": "clip-text-with-projection",
31
+ "optlevel": "2",
32
+ "output_attentions": false,
33
+ "output_hidden_states": false,
34
+ "output_names": [
35
+ "text_embeds",
36
+ "last_hidden_state",
37
+ "hidden_states"
38
+ ],
39
+ "static_batch_size": 1,
40
+ "static_sequence_length": 77,
41
+ "task": "feature-extraction",
42
+ "tensor_parallel_size": 1
43
+ },
44
+ "num_attention_heads": 20,
45
+ "num_hidden_layers": 32,
46
+ "output_hidden_states": true,
47
+ "pad_token_id": 1,
48
+ "projection_dim": 1280,
49
+ "return_dict": false,
50
+ "torch_dtype": "float32",
51
+ "torchscript": true,
52
+ "transformers_version": "4.55.4",
53
+ "vocab_size": 49408
54
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/text_encoder_2/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a712b91bfd3af01f0b8b264731e37c9f62868f9fb0a61b906f679796c6cd03
3
+ size 2801803637
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "extra_special_tokens": {},
35
+ "model_max_length": 77,
36
+ "pad_token": "!",
37
+ "tokenizer_class": "CLIPTokenizer",
38
+ "unk_token": "<|endoftext|>"
39
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/unet/config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.35.1",
4
+ "_use_default_values": [
5
+ "attention_type",
6
+ "dropout",
7
+ "reverse_transformer_layers_per_block"
8
+ ],
9
+ "act_fn": "silu",
10
+ "addition_embed_type": "text_time",
11
+ "addition_embed_type_num_heads": 64,
12
+ "addition_time_embed_dim": 256,
13
+ "attention_head_dim": [
14
+ 6,
15
+ 12,
16
+ 24,
17
+ 24
18
+ ],
19
+ "attention_type": "default",
20
+ "block_out_channels": [
21
+ 384,
22
+ 768,
23
+ 1536,
24
+ 1536
25
+ ],
26
+ "center_input_sample": false,
27
+ "class_embed_type": null,
28
+ "class_embeddings_concat": false,
29
+ "conv_in_kernel": 3,
30
+ "conv_out_kernel": 3,
31
+ "cross_attention_dim": 1280,
32
+ "cross_attention_norm": null,
33
+ "down_block_types": [
34
+ "DownBlock2D",
35
+ "CrossAttnDownBlock2D",
36
+ "CrossAttnDownBlock2D",
37
+ "DownBlock2D"
38
+ ],
39
+ "downsample_padding": 1,
40
+ "dropout": 0.0,
41
+ "dual_cross_attention": false,
42
+ "encoder_hid_dim": null,
43
+ "encoder_hid_dim_type": null,
44
+ "flip_sin_to_cos": true,
45
+ "freq_shift": 0,
46
+ "in_channels": 4,
47
+ "layers_per_block": 2,
48
+ "mid_block_only_cross_attention": null,
49
+ "mid_block_scale_factor": 1,
50
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
51
+ "neuron": {
52
+ "auto_cast": "matmul",
53
+ "auto_cast_type": "bf16",
54
+ "compiler_type": "neuronx-cc",
55
+ "compiler_version": "2.19.8089.0+8ab9f450",
56
+ "dynamic_batch_size": false,
57
+ "float_dtype": "fp32",
58
+ "inline_weights_to_neff": true,
59
+ "input_names": [
60
+ "sample",
61
+ "timestep",
62
+ "encoder_hidden_states",
63
+ "text_embeds",
64
+ "time_ids"
65
+ ],
66
+ "int_dtype": "int64",
67
+ "model_type": "unet",
68
+ "optlevel": "2",
69
+ "output_attentions": false,
70
+ "output_hidden_states": false,
71
+ "output_names": [
72
+ "sample"
73
+ ],
74
+ "static_batch_size": 1,
75
+ "static_height": 128,
76
+ "static_num_channels": 4,
77
+ "static_sequence_length": 77,
78
+ "static_vae_scale_factor": 8,
79
+ "static_width": 128,
80
+ "task": "semantic-segmentation",
81
+ "tensor_parallel_size": 1
82
+ },
83
+ "norm_eps": 1e-05,
84
+ "norm_num_groups": 32,
85
+ "num_attention_heads": null,
86
+ "num_class_embeds": null,
87
+ "only_cross_attention": false,
88
+ "out_channels": 4,
89
+ "output_attentions": false,
90
+ "projection_class_embeddings_input_dim": 2560,
91
+ "resnet_out_scale_factor": 1.0,
92
+ "resnet_skip_time_act": false,
93
+ "resnet_time_scale_shift": "default",
94
+ "reverse_transformer_layers_per_block": null,
95
+ "sample_size": 128,
96
+ "time_cond_proj_dim": null,
97
+ "time_embedding_act_fn": null,
98
+ "time_embedding_dim": null,
99
+ "time_embedding_type": "positional",
100
+ "timestep_post_act": null,
101
+ "transformer_layers_per_block": 4,
102
+ "transformers_version": null,
103
+ "up_block_types": [
104
+ "UpBlock2D",
105
+ "CrossAttnUpBlock2D",
106
+ "CrossAttnUpBlock2D",
107
+ "UpBlock2D"
108
+ ],
109
+ "upcast_attention": null,
110
+ "use_linear_projection": true
111
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/unet/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28065f3a89861e3eae0830b960a86cea5c33b2a1f31967c3f294255040393fcd
3
+ size 3686370617
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/vae_decoder/config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.35.1",
4
+ "_use_default_values": [
5
+ "use_quant_conv",
6
+ "mid_block_add_attention",
7
+ "shift_factor",
8
+ "latents_mean",
9
+ "use_post_quant_conv",
10
+ "latents_std"
11
+ ],
12
+ "act_fn": "silu",
13
+ "block_out_channels": [
14
+ 128,
15
+ 256,
16
+ 512,
17
+ 512
18
+ ],
19
+ "down_block_types": [
20
+ "DownEncoderBlock2D",
21
+ "DownEncoderBlock2D",
22
+ "DownEncoderBlock2D",
23
+ "DownEncoderBlock2D"
24
+ ],
25
+ "force_upcast": true,
26
+ "in_channels": 3,
27
+ "latent_channels": 4,
28
+ "latents_mean": null,
29
+ "latents_std": null,
30
+ "layers_per_block": 2,
31
+ "mid_block_add_attention": true,
32
+ "neuron": {
33
+ "auto_cast": "matmul",
34
+ "auto_cast_type": "bf16",
35
+ "compiler_type": "neuronx-cc",
36
+ "compiler_version": "2.19.8089.0+8ab9f450",
37
+ "dynamic_batch_size": false,
38
+ "float_dtype": "fp32",
39
+ "inline_weights_to_neff": true,
40
+ "input_names": [
41
+ "latent_sample"
42
+ ],
43
+ "int_dtype": "int64",
44
+ "model_type": "vae-decoder",
45
+ "optlevel": "2",
46
+ "output_attentions": false,
47
+ "output_hidden_states": false,
48
+ "output_names": [
49
+ "sample"
50
+ ],
51
+ "static_batch_size": 1,
52
+ "static_height": 128,
53
+ "static_num_channels": 4,
54
+ "static_width": 128,
55
+ "task": "semantic-segmentation",
56
+ "tensor_parallel_size": 1
57
+ },
58
+ "norm_num_groups": 32,
59
+ "out_channels": 3,
60
+ "output_attentions": false,
61
+ "sample_size": 1024,
62
+ "scaling_factor": 0.13025,
63
+ "shift_factor": null,
64
+ "transformers_version": null,
65
+ "up_block_types": [
66
+ "UpDecoderBlock2D",
67
+ "UpDecoderBlock2D",
68
+ "UpDecoderBlock2D",
69
+ "UpDecoderBlock2D"
70
+ ],
71
+ "use_post_quant_conv": true,
72
+ "use_quant_conv": true
73
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/vae_decoder/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd06927024eb773d727111280344efa6bef7647c251e27684d7e32aac3a82428
3
+ size 370909427
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/vae_encoder/config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.35.1",
4
+ "_use_default_values": [
5
+ "use_quant_conv",
6
+ "mid_block_add_attention",
7
+ "shift_factor",
8
+ "latents_mean",
9
+ "use_post_quant_conv",
10
+ "latents_std"
11
+ ],
12
+ "act_fn": "silu",
13
+ "block_out_channels": [
14
+ 128,
15
+ 256,
16
+ 512,
17
+ 512
18
+ ],
19
+ "down_block_types": [
20
+ "DownEncoderBlock2D",
21
+ "DownEncoderBlock2D",
22
+ "DownEncoderBlock2D",
23
+ "DownEncoderBlock2D"
24
+ ],
25
+ "force_upcast": true,
26
+ "in_channels": 3,
27
+ "latent_channels": 4,
28
+ "latents_mean": null,
29
+ "latents_std": null,
30
+ "layers_per_block": 2,
31
+ "mid_block_add_attention": true,
32
+ "neuron": {
33
+ "auto_cast": "matmul",
34
+ "auto_cast_type": "bf16",
35
+ "compiler_type": "neuronx-cc",
36
+ "compiler_version": "2.19.8089.0+8ab9f450",
37
+ "dynamic_batch_size": false,
38
+ "float_dtype": "fp32",
39
+ "inline_weights_to_neff": true,
40
+ "input_names": [
41
+ "sample"
42
+ ],
43
+ "int_dtype": "int64",
44
+ "model_type": "vae-encoder",
45
+ "optlevel": "2",
46
+ "output_attentions": false,
47
+ "output_hidden_states": false,
48
+ "output_names": [
49
+ "latent_parameters"
50
+ ],
51
+ "static_batch_size": 1,
52
+ "static_height": 1024,
53
+ "static_num_channels": 3,
54
+ "static_width": 1024,
55
+ "task": "semantic-segmentation",
56
+ "tensor_parallel_size": 1
57
+ },
58
+ "norm_num_groups": 32,
59
+ "out_channels": 3,
60
+ "output_attentions": false,
61
+ "sample_size": 1024,
62
+ "scaling_factor": 0.13025,
63
+ "shift_factor": null,
64
+ "transformers_version": null,
65
+ "up_block_types": [
66
+ "UpDecoderBlock2D",
67
+ "UpDecoderBlock2D",
68
+ "UpDecoderBlock2D",
69
+ "UpDecoderBlock2D"
70
+ ],
71
+ "use_post_quant_conv": true,
72
+ "use_quant_conv": true
73
+ }
neuronxcc-2.19.8089.0+8ab9f450/MODULE_006ebbe04966f503c7ba/vae_encoder/model.neuron ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6ec863faa78d055e49edecdff2ef00d32f173a4872df90a9ee4e81b7f8619af
3
+ size 222984881