Add files using upload-large-folder tool
Browse files- README.md +4 -18
- chat_template.jinja +7 -0
- config.json +82 -39
- generation_config.json +2 -2
- model-00001-of-00005.safetensors +2 -2
- model-00002-of-00005.safetensors +2 -2
- model-00003-of-00005.safetensors +2 -2
- model-00004-of-00005.safetensors +2 -2
- model-00005-of-00005.safetensors +2 -2
- model.safetensors.index.json +448 -483
- tokenizer_config.json +4 -4
README.md
CHANGED
@@ -1,29 +1,15 @@
|
|
1 |
---
|
2 |
-
base_model:
|
|
|
3 |
license: apache-2.0
|
4 |
language:
|
5 |
- en
|
6 |
pipeline_tag: image-text-to-text
|
7 |
tags:
|
8 |
- multimodal
|
|
|
9 |
library_name: transformers
|
10 |
---
|
11 |
-
<div>
|
12 |
-
<p style="margin-bottom: 0; margin-top: 0;">
|
13 |
-
<strong>See <a href="https://huggingface.co/collections/unsloth/qwen25-vl-all-versions-679ca6c784fad5bd976a05a1">our collection</a> for versions of Qwen2.5-VL including 4-bit & dynamic formats.</strong>
|
14 |
-
</p>
|
15 |
-
<div style="display: flex; gap: 5px; align-items: center; ">
|
16 |
-
<a href="https://github.com/unslothai/unsloth/">
|
17 |
-
<img src="https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png" width="133">
|
18 |
-
</a>
|
19 |
-
<a href="https://discord.gg/unsloth">
|
20 |
-
<img src="https://github.com/unslothai/unsloth/raw/main/images/Discord%20button.png" width="173">
|
21 |
-
</a>
|
22 |
-
<a href="https://docs.unsloth.ai/">
|
23 |
-
<img src="https://raw.githubusercontent.com/unslothai/unsloth/refs/heads/main/images/documentation%20green%20button.png" width="143">
|
24 |
-
</a>
|
25 |
-
</div>
|
26 |
-
</div>
|
27 |
|
28 |
# Qwen2.5-VL-32B-Instruct
|
29 |
<a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;">
|
@@ -66,7 +52,7 @@ We extend dynamic resolution to the temporal dimension by adopting dynamic FPS s
|
|
66 |
We enhance both training and inference speeds by strategically implementing window attention into the ViT. The ViT architecture is further optimized with SwiGLU and RMSNorm, aligning it with the structure of the Qwen2.5 LLM.
|
67 |
|
68 |
|
69 |
-
We have
|
70 |
|
71 |
|
72 |
|
|
|
1 |
---
|
2 |
+
base_model:
|
3 |
+
- Qwen/Qwen2.5-VL-32B-Instruct
|
4 |
license: apache-2.0
|
5 |
language:
|
6 |
- en
|
7 |
pipeline_tag: image-text-to-text
|
8 |
tags:
|
9 |
- multimodal
|
10 |
+
- unsloth
|
11 |
library_name: transformers
|
12 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Qwen2.5-VL-32B-Instruct
|
15 |
<a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;">
|
|
|
52 |
We enhance both training and inference speeds by strategically implementing window attention into the ViT. The ViT architecture is further optimized with SwiGLU and RMSNorm, aligning it with the structure of the Qwen2.5 LLM.
|
53 |
|
54 |
|
55 |
+
We have four models with 3, 7, 32 and 72 billion parameters. This repo contains the instruction-tuned 32B Qwen2.5-VL model. For more information, visit our [Blog](https://qwenlm.github.io/blog/qwen2.5-vl/) and [GitHub](https://github.com/QwenLM/Qwen2.5-VL).
|
56 |
|
57 |
|
58 |
|
chat_template.jinja
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
|
2 |
+
You are a helpful assistant.<|im_end|>
|
3 |
+
{% endif %}<|im_start|>{{ message['role'] }}
|
4 |
+
{% if message['content'] is string %}{{ message['content'] }}<|im_end|>
|
5 |
+
{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
|
6 |
+
{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
|
7 |
+
{% endif %}
|
config.json
CHANGED
@@ -9,7 +9,7 @@
|
|
9 |
"image_token_id": 151655,
|
10 |
"initializer_range": 0.02,
|
11 |
"intermediate_size": 27648,
|
12 |
-
"max_position_embeddings":
|
13 |
"max_window_layers": 64,
|
14 |
"model_type": "qwen2_5_vl",
|
15 |
"num_attention_heads": 40,
|
@@ -31,75 +31,78 @@
|
|
31 |
"merger",
|
32 |
"modality_projection",
|
33 |
"model.layers.10.mlp",
|
34 |
-
"model.layers.
|
35 |
-
"model.layers.22.mlp",
|
36 |
"model.layers.15.mlp",
|
37 |
-
"
|
|
|
|
|
38 |
"visual.blocks.27.attn",
|
|
|
39 |
"visual.blocks.28.attn",
|
|
|
40 |
"visual.blocks.31.attn",
|
|
|
|
|
41 |
"visual.merger.mlp",
|
42 |
"visual.blocks.30.mlp",
|
43 |
-
"visual.blocks.29.attn",
|
44 |
-
"visual.blocks.26.attn",
|
45 |
"visual.blocks.24.attn",
|
46 |
"visual.blocks.30.attn",
|
47 |
-
"visual.blocks.21.attn",
|
48 |
-
"visual.blocks.25.attn",
|
49 |
"visual.blocks.22.attn",
|
50 |
-
"visual.blocks.
|
51 |
-
"visual.blocks.19.attn",
|
52 |
"visual.blocks.20.attn",
|
|
|
53 |
"model.layers.6.mlp",
|
54 |
-
"visual.blocks.18.attn",
|
55 |
"visual.blocks.26.mlp",
|
|
|
56 |
"visual.blocks.27.mlp",
|
57 |
-
"visual.blocks.
|
58 |
-
"visual.blocks.18.mlp",
|
59 |
"visual.blocks.19.mlp",
|
60 |
-
"visual.blocks.
|
61 |
"visual.blocks.23.mlp",
|
62 |
-
"visual.blocks.
|
63 |
"visual.blocks.23.attn",
|
|
|
64 |
"visual.blocks.22.mlp",
|
65 |
-
"visual.blocks.
|
66 |
-
"visual.blocks.12.attn",
|
67 |
-
"visual.blocks.11.attn",
|
68 |
-
"visual.blocks.9.attn",
|
69 |
-
"visual.blocks.21.mlp",
|
70 |
-
"visual.blocks.16.attn",
|
71 |
"visual.blocks.14.attn",
|
|
|
|
|
|
|
|
|
72 |
"visual.blocks.10.mlp",
|
73 |
-
"visual.blocks.
|
|
|
|
|
74 |
"visual.blocks.13.attn",
|
75 |
-
"visual.blocks.
|
76 |
-
"visual.blocks.11.mlp",
|
77 |
-
"visual.blocks.9.mlp",
|
78 |
-
"visual.blocks.6.attn",
|
79 |
-
"visual.blocks.5.mlp",
|
80 |
-
"visual.blocks.12.mlp",
|
81 |
"visual.blocks.4.mlp",
|
82 |
-
"visual.blocks.
|
|
|
|
|
83 |
"visual.blocks.8.mlp",
|
84 |
-
"visual.blocks.
|
85 |
"visual.blocks.2.attn",
|
86 |
-
"visual.blocks.
|
87 |
-
"visual.blocks.
|
88 |
"visual.blocks.8.attn",
|
|
|
89 |
"visual.blocks.13.mlp",
|
90 |
-
"visual.blocks.
|
|
|
91 |
"visual.blocks.15.mlp",
|
92 |
-
"visual.blocks.2.mlp",
|
93 |
"visual.blocks.1.attn",
|
94 |
-
"visual.blocks.
|
95 |
-
"visual.blocks.0.attn",
|
96 |
"visual.blocks.15.attn",
|
97 |
-
"visual.blocks.4.attn",
|
98 |
"visual.blocks.7.attn",
|
|
|
|
|
|
|
99 |
"visual.blocks.3.attn",
|
|
|
100 |
"visual.blocks.14.mlp",
|
|
|
101 |
"visual.blocks.0.mlp",
|
102 |
-
"visual.blocks.
|
103 |
],
|
104 |
"llm_int8_threshold": 6.0,
|
105 |
"load_in_4bit": true,
|
@@ -118,9 +121,48 @@
|
|
118 |
},
|
119 |
"rope_theta": 1000000.0,
|
120 |
"sliding_window": 32768,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
"tie_word_embeddings": false,
|
122 |
"torch_dtype": "bfloat16",
|
123 |
-
"transformers_version": "4.
|
124 |
"unsloth_fixed": true,
|
125 |
"use_cache": true,
|
126 |
"use_sliding_window": false,
|
@@ -137,6 +179,7 @@
|
|
137 |
"hidden_size": 1280,
|
138 |
"in_channels": 3,
|
139 |
"in_chans": 3,
|
|
|
140 |
"intermediate_size": 3456,
|
141 |
"model_type": "qwen2_5_vl",
|
142 |
"num_heads": 16,
|
|
|
9 |
"image_token_id": 151655,
|
10 |
"initializer_range": 0.02,
|
11 |
"intermediate_size": 27648,
|
12 |
+
"max_position_embeddings": 128000,
|
13 |
"max_window_layers": 64,
|
14 |
"model_type": "qwen2_5_vl",
|
15 |
"num_attention_heads": 40,
|
|
|
31 |
"merger",
|
32 |
"modality_projection",
|
33 |
"model.layers.10.mlp",
|
34 |
+
"model.layers.16.mlp",
|
|
|
35 |
"model.layers.15.mlp",
|
36 |
+
"model.layers.5.mlp",
|
37 |
+
"model.layers.5.self_attn",
|
38 |
+
"model.layers.8.mlp",
|
39 |
"visual.blocks.27.attn",
|
40 |
+
"visual.blocks.31.mlp",
|
41 |
"visual.blocks.28.attn",
|
42 |
+
"visual.blocks.21.attn",
|
43 |
"visual.blocks.31.attn",
|
44 |
+
"visual.blocks.26.attn",
|
45 |
+
"visual.blocks.29.attn",
|
46 |
"visual.merger.mlp",
|
47 |
"visual.blocks.30.mlp",
|
|
|
|
|
48 |
"visual.blocks.24.attn",
|
49 |
"visual.blocks.30.attn",
|
|
|
|
|
50 |
"visual.blocks.22.attn",
|
51 |
+
"visual.blocks.25.attn",
|
|
|
52 |
"visual.blocks.20.attn",
|
53 |
+
"visual.blocks.29.mlp",
|
54 |
"model.layers.6.mlp",
|
|
|
55 |
"visual.blocks.26.mlp",
|
56 |
+
"visual.blocks.25.mlp",
|
57 |
"visual.blocks.27.mlp",
|
58 |
+
"visual.blocks.18.attn",
|
|
|
59 |
"visual.blocks.19.mlp",
|
60 |
+
"visual.blocks.19.attn",
|
61 |
"visual.blocks.23.mlp",
|
62 |
+
"visual.blocks.17.attn",
|
63 |
"visual.blocks.23.attn",
|
64 |
+
"visual.blocks.20.mlp",
|
65 |
"visual.blocks.22.mlp",
|
66 |
+
"visual.blocks.9.mlp",
|
|
|
|
|
|
|
|
|
|
|
67 |
"visual.blocks.14.attn",
|
68 |
+
"visual.blocks.18.mlp",
|
69 |
+
"visual.blocks.28.mlp",
|
70 |
+
"visual.blocks.24.mlp",
|
71 |
+
"visual.blocks.11.attn",
|
72 |
"visual.blocks.10.mlp",
|
73 |
+
"visual.blocks.6.mlp",
|
74 |
+
"visual.blocks.16.attn",
|
75 |
+
"visual.blocks.21.mlp",
|
76 |
"visual.blocks.13.attn",
|
77 |
+
"visual.blocks.12.attn",
|
|
|
|
|
|
|
|
|
|
|
78 |
"visual.blocks.4.mlp",
|
79 |
+
"visual.blocks.10.attn",
|
80 |
+
"visual.blocks.11.mlp",
|
81 |
+
"visual.blocks.9.attn",
|
82 |
"visual.blocks.8.mlp",
|
83 |
+
"visual.blocks.16.mlp",
|
84 |
"visual.blocks.2.attn",
|
85 |
+
"visual.blocks.5.mlp",
|
86 |
+
"visual.blocks.2.mlp",
|
87 |
"visual.blocks.8.attn",
|
88 |
+
"visual.blocks.6.attn",
|
89 |
"visual.blocks.13.mlp",
|
90 |
+
"visual.blocks.7.mlp",
|
91 |
+
"visual.blocks.1.mlp",
|
92 |
"visual.blocks.15.mlp",
|
|
|
93 |
"visual.blocks.1.attn",
|
94 |
+
"visual.blocks.3.mlp",
|
|
|
95 |
"visual.blocks.15.attn",
|
|
|
96 |
"visual.blocks.7.attn",
|
97 |
+
"visual.blocks.12.mlp",
|
98 |
+
"visual.blocks.5.attn",
|
99 |
+
"visual.blocks.4.attn",
|
100 |
"visual.blocks.3.attn",
|
101 |
+
"visual.blocks.0.attn",
|
102 |
"visual.blocks.14.mlp",
|
103 |
+
"visual.blocks.17.mlp",
|
104 |
"visual.blocks.0.mlp",
|
105 |
+
"visual.blocks.31.mlp.down_proj"
|
106 |
],
|
107 |
"llm_int8_threshold": 6.0,
|
108 |
"load_in_4bit": true,
|
|
|
121 |
},
|
122 |
"rope_theta": 1000000.0,
|
123 |
"sliding_window": 32768,
|
124 |
+
"text_config": {
|
125 |
+
"architectures": [
|
126 |
+
"Qwen2_5_VLForConditionalGeneration"
|
127 |
+
],
|
128 |
+
"attention_dropout": 0.0,
|
129 |
+
"eos_token_id": 151645,
|
130 |
+
"hidden_act": "silu",
|
131 |
+
"hidden_size": 5120,
|
132 |
+
"image_token_id": null,
|
133 |
+
"initializer_range": 0.02,
|
134 |
+
"intermediate_size": 27648,
|
135 |
+
"max_position_embeddings": 128000,
|
136 |
+
"max_window_layers": 64,
|
137 |
+
"model_type": "qwen2_5_vl_text",
|
138 |
+
"num_attention_heads": 40,
|
139 |
+
"num_hidden_layers": 64,
|
140 |
+
"num_key_value_heads": 8,
|
141 |
+
"pad_token_id": 151643,
|
142 |
+
"rms_norm_eps": 1e-06,
|
143 |
+
"rope_scaling": {
|
144 |
+
"mrope_section": [
|
145 |
+
16,
|
146 |
+
24,
|
147 |
+
24
|
148 |
+
],
|
149 |
+
"rope_type": "default",
|
150 |
+
"type": "default"
|
151 |
+
},
|
152 |
+
"rope_theta": 1000000.0,
|
153 |
+
"sliding_window": 32768,
|
154 |
+
"torch_dtype": "bfloat16",
|
155 |
+
"use_cache": true,
|
156 |
+
"use_sliding_window": false,
|
157 |
+
"video_token_id": null,
|
158 |
+
"vision_end_token_id": 151653,
|
159 |
+
"vision_start_token_id": 151652,
|
160 |
+
"vision_token_id": 151654,
|
161 |
+
"vocab_size": 152064
|
162 |
+
},
|
163 |
"tie_word_embeddings": false,
|
164 |
"torch_dtype": "bfloat16",
|
165 |
+
"transformers_version": "4.52.0.dev0",
|
166 |
"unsloth_fixed": true,
|
167 |
"use_cache": true,
|
168 |
"use_sliding_window": false,
|
|
|
179 |
"hidden_size": 1280,
|
180 |
"in_channels": 3,
|
181 |
"in_chans": 3,
|
182 |
+
"initializer_range": 0.02,
|
183 |
"intermediate_size": 3456,
|
184 |
"model_type": "qwen2_5_vl",
|
185 |
"num_heads": 16,
|
generation_config.json
CHANGED
@@ -5,9 +5,9 @@
|
|
5 |
151645,
|
6 |
151643
|
7 |
],
|
8 |
-
"max_length":
|
9 |
"pad_token_id": 151654,
|
10 |
"repetition_penalty": 1.05,
|
11 |
"temperature": 1e-06,
|
12 |
-
"transformers_version": "4.
|
13 |
}
|
|
|
5 |
151645,
|
6 |
151643
|
7 |
],
|
8 |
+
"max_length": 128000,
|
9 |
"pad_token_id": 151654,
|
10 |
"repetition_penalty": 1.05,
|
11 |
"temperature": 1e-06,
|
12 |
+
"transformers_version": "4.52.0.dev0"
|
13 |
}
|
model-00001-of-00005.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75e8b15c927cc3ae1737f61bc7b238d5e008a73b5e8052ee8f75189b710a70c1
|
3 |
+
size 4884840863
|
model-00002-of-00005.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:614eb314745898b39031deafa6486b022a5272920cf7a2ffddd4e3d35293268d
|
3 |
+
size 4753752247
|
model-00003-of-00005.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9822c75f53a00867e05736b063a57010714a63d2302c3bf66ba407bbdbd29cae
|
3 |
+
size 4989181706
|
model-00004-of-00005.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:36c7b46219878dff2dcf373aa56347da55958df1d9f9d5e0ae94c4b25e87eab0
|
3 |
+
size 4939613635
|
model-00005-of-00005.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:98cdc0eb48e4279e454248b2aa1937209041cf6a91007d8dbab5494742270310
|
3 |
+
size 4900743706
|
model.safetensors.index.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
-
"total_size":
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"lm_head.weight": "model-00005-of-00005.safetensors",
|
@@ -319,11 +319,11 @@
|
|
319 |
"model.layers.14.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
320 |
"model.layers.14.self_attn.v_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
321 |
"model.layers.14.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
322 |
-
"model.layers.15.input_layernorm.weight": "model-
|
323 |
-
"model.layers.15.mlp.down_proj.weight": "model-
|
324 |
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
325 |
-
"model.layers.15.mlp.up_proj.weight": "model-
|
326 |
-
"model.layers.15.post_attention_layernorm.weight": "model-
|
327 |
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
328 |
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
329 |
"model.layers.15.self_attn.k_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
@@ -351,53 +351,38 @@
|
|
351 |
"model.layers.15.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
352 |
"model.layers.15.self_attn.v_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
353 |
"model.layers.15.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
354 |
-
"model.layers.16.input_layernorm.weight": "model-
|
355 |
-
"model.layers.16.mlp.down_proj.weight": "model-
|
356 |
-
"model.layers.16.mlp.
|
357 |
-
"model.layers.16.mlp.
|
358 |
-
"model.layers.16.
|
359 |
-
"model.layers.16.
|
360 |
-
"model.layers.16.
|
361 |
-
"model.layers.16.
|
362 |
-
"model.layers.16.
|
363 |
-
"model.layers.16.
|
364 |
-
"model.layers.16.
|
365 |
-
"model.layers.16.
|
366 |
-
"model.layers.16.
|
367 |
-
"model.layers.16.
|
368 |
-
"model.layers.16.
|
369 |
-
"model.layers.16.
|
370 |
-
"model.layers.16.
|
371 |
-
"model.layers.16.
|
372 |
-
"model.layers.16.
|
373 |
-
"model.layers.16.
|
374 |
-
"model.layers.16.self_attn.
|
375 |
-
"model.layers.16.self_attn.
|
376 |
-
"model.layers.16.self_attn.
|
377 |
-
"model.layers.16.self_attn.
|
378 |
-
"model.layers.16.self_attn.
|
379 |
-
"model.layers.16.self_attn.
|
380 |
-
"model.layers.16.self_attn.
|
381 |
-
"model.layers.16.self_attn.
|
382 |
-
"model.layers.16.self_attn.
|
383 |
-
"model.layers.16.self_attn.
|
384 |
-
"model.layers.16.self_attn.
|
385 |
-
"model.layers.16.self_attn.
|
386 |
-
"model.layers.16.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
387 |
-
"model.layers.16.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
388 |
-
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
389 |
-
"model.layers.16.self_attn.q_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
390 |
-
"model.layers.16.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
391 |
-
"model.layers.16.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
392 |
-
"model.layers.16.self_attn.q_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
393 |
-
"model.layers.16.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
394 |
-
"model.layers.16.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
395 |
-
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
396 |
-
"model.layers.16.self_attn.v_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
397 |
-
"model.layers.16.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
398 |
-
"model.layers.16.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
399 |
-
"model.layers.16.self_attn.v_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
400 |
-
"model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
401 |
"model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
402 |
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
403 |
"model.layers.17.mlp.down_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
@@ -418,33 +403,33 @@
|
|
418 |
"model.layers.17.mlp.up_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
419 |
"model.layers.17.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
420 |
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
421 |
-
"model.layers.17.self_attn.k_proj.bias": "model-
|
422 |
-
"model.layers.17.self_attn.k_proj.weight": "model-
|
423 |
-
"model.layers.17.self_attn.k_proj.weight.absmax": "model-
|
424 |
-
"model.layers.17.self_attn.k_proj.weight.nested_absmax": "model-
|
425 |
-
"model.layers.17.self_attn.k_proj.weight.nested_quant_map": "model-
|
426 |
-
"model.layers.17.self_attn.k_proj.weight.quant_map": "model-
|
427 |
-
"model.layers.17.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
428 |
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
429 |
"model.layers.17.self_attn.o_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
430 |
"model.layers.17.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
431 |
"model.layers.17.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
432 |
"model.layers.17.self_attn.o_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
433 |
"model.layers.17.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
434 |
-
"model.layers.17.self_attn.q_proj.bias": "model-
|
435 |
-
"model.layers.17.self_attn.q_proj.weight": "model-
|
436 |
-
"model.layers.17.self_attn.q_proj.weight.absmax": "model-
|
437 |
-
"model.layers.17.self_attn.q_proj.weight.nested_absmax": "model-
|
438 |
-
"model.layers.17.self_attn.q_proj.weight.nested_quant_map": "model-
|
439 |
-
"model.layers.17.self_attn.q_proj.weight.quant_map": "model-
|
440 |
-
"model.layers.17.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
441 |
-
"model.layers.17.self_attn.v_proj.bias": "model-
|
442 |
-
"model.layers.17.self_attn.v_proj.weight": "model-
|
443 |
-
"model.layers.17.self_attn.v_proj.weight.absmax": "model-
|
444 |
-
"model.layers.17.self_attn.v_proj.weight.nested_absmax": "model-
|
445 |
-
"model.layers.17.self_attn.v_proj.weight.nested_quant_map": "model-
|
446 |
-
"model.layers.17.self_attn.v_proj.weight.quant_map": "model-
|
447 |
-
"model.layers.17.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
448 |
"model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
449 |
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
450 |
"model.layers.18.mlp.down_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
@@ -682,8 +667,23 @@
|
|
682 |
"model.layers.21.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
683 |
"model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
684 |
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
685 |
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
686 |
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
687 |
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
688 |
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
689 |
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
@@ -1135,26 +1135,26 @@
|
|
1135 |
"model.layers.30.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
1136 |
"model.layers.30.self_attn.v_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
1137 |
"model.layers.30.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
1138 |
-
"model.layers.31.input_layernorm.weight": "model-
|
1139 |
-
"model.layers.31.mlp.down_proj.weight": "model-
|
1140 |
-
"model.layers.31.mlp.down_proj.weight.absmax": "model-
|
1141 |
-
"model.layers.31.mlp.down_proj.weight.nested_absmax": "model-
|
1142 |
-
"model.layers.31.mlp.down_proj.weight.nested_quant_map": "model-
|
1143 |
-
"model.layers.31.mlp.down_proj.weight.quant_map": "model-
|
1144 |
-
"model.layers.31.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1145 |
-
"model.layers.31.mlp.gate_proj.weight": "model-
|
1146 |
-
"model.layers.31.mlp.gate_proj.weight.absmax": "model-
|
1147 |
-
"model.layers.31.mlp.gate_proj.weight.nested_absmax": "model-
|
1148 |
-
"model.layers.31.mlp.gate_proj.weight.nested_quant_map": "model-
|
1149 |
-
"model.layers.31.mlp.gate_proj.weight.quant_map": "model-
|
1150 |
-
"model.layers.31.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1151 |
-
"model.layers.31.mlp.up_proj.weight": "model-
|
1152 |
-
"model.layers.31.mlp.up_proj.weight.absmax": "model-
|
1153 |
-
"model.layers.31.mlp.up_proj.weight.nested_absmax": "model-
|
1154 |
-
"model.layers.31.mlp.up_proj.weight.nested_quant_map": "model-
|
1155 |
-
"model.layers.31.mlp.up_proj.weight.quant_map": "model-
|
1156 |
-
"model.layers.31.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1157 |
-
"model.layers.31.post_attention_layernorm.weight": "model-
|
1158 |
"model.layers.31.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
1159 |
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
1160 |
"model.layers.31.self_attn.k_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
@@ -1162,12 +1162,12 @@
|
|
1162 |
"model.layers.31.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
1163 |
"model.layers.31.self_attn.k_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
1164 |
"model.layers.31.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
1165 |
-
"model.layers.31.self_attn.o_proj.weight": "model-
|
1166 |
-
"model.layers.31.self_attn.o_proj.weight.absmax": "model-
|
1167 |
-
"model.layers.31.self_attn.o_proj.weight.nested_absmax": "model-
|
1168 |
-
"model.layers.31.self_attn.o_proj.weight.nested_quant_map": "model-
|
1169 |
-
"model.layers.31.self_attn.o_proj.weight.quant_map": "model-
|
1170 |
-
"model.layers.31.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1171 |
"model.layers.31.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
1172 |
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
1173 |
"model.layers.31.self_attn.q_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
@@ -1182,100 +1182,100 @@
|
|
1182 |
"model.layers.31.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
1183 |
"model.layers.31.self_attn.v_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
1184 |
"model.layers.31.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
1185 |
-
"model.layers.32.input_layernorm.weight": "model-
|
1186 |
-
"model.layers.32.mlp.down_proj.weight": "model-
|
1187 |
-
"model.layers.32.mlp.down_proj.weight.absmax": "model-
|
1188 |
-
"model.layers.32.mlp.down_proj.weight.nested_absmax": "model-
|
1189 |
-
"model.layers.32.mlp.down_proj.weight.nested_quant_map": "model-
|
1190 |
-
"model.layers.32.mlp.down_proj.weight.quant_map": "model-
|
1191 |
-
"model.layers.32.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1192 |
-
"model.layers.32.mlp.gate_proj.weight": "model-
|
1193 |
-
"model.layers.32.mlp.gate_proj.weight.absmax": "model-
|
1194 |
-
"model.layers.32.mlp.gate_proj.weight.nested_absmax": "model-
|
1195 |
-
"model.layers.32.mlp.gate_proj.weight.nested_quant_map": "model-
|
1196 |
-
"model.layers.32.mlp.gate_proj.weight.quant_map": "model-
|
1197 |
-
"model.layers.32.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1198 |
-
"model.layers.32.mlp.up_proj.weight": "model-
|
1199 |
-
"model.layers.32.mlp.up_proj.weight.absmax": "model-
|
1200 |
-
"model.layers.32.mlp.up_proj.weight.nested_absmax": "model-
|
1201 |
-
"model.layers.32.mlp.up_proj.weight.nested_quant_map": "model-
|
1202 |
-
"model.layers.32.mlp.up_proj.weight.quant_map": "model-
|
1203 |
-
"model.layers.32.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1204 |
-
"model.layers.32.post_attention_layernorm.weight": "model-
|
1205 |
-
"model.layers.32.self_attn.k_proj.bias": "model-
|
1206 |
-
"model.layers.32.self_attn.k_proj.weight": "model-
|
1207 |
-
"model.layers.32.self_attn.k_proj.weight.absmax": "model-
|
1208 |
-
"model.layers.32.self_attn.k_proj.weight.nested_absmax": "model-
|
1209 |
-
"model.layers.32.self_attn.k_proj.weight.nested_quant_map": "model-
|
1210 |
-
"model.layers.32.self_attn.k_proj.weight.quant_map": "model-
|
1211 |
-
"model.layers.32.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1212 |
-
"model.layers.32.self_attn.o_proj.weight": "model-
|
1213 |
-
"model.layers.32.self_attn.o_proj.weight.absmax": "model-
|
1214 |
-
"model.layers.32.self_attn.o_proj.weight.nested_absmax": "model-
|
1215 |
-
"model.layers.32.self_attn.o_proj.weight.nested_quant_map": "model-
|
1216 |
-
"model.layers.32.self_attn.o_proj.weight.quant_map": "model-
|
1217 |
-
"model.layers.32.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1218 |
-
"model.layers.32.self_attn.q_proj.bias": "model-
|
1219 |
-
"model.layers.32.self_attn.q_proj.weight": "model-
|
1220 |
-
"model.layers.32.self_attn.q_proj.weight.absmax": "model-
|
1221 |
-
"model.layers.32.self_attn.q_proj.weight.nested_absmax": "model-
|
1222 |
-
"model.layers.32.self_attn.q_proj.weight.nested_quant_map": "model-
|
1223 |
-
"model.layers.32.self_attn.q_proj.weight.quant_map": "model-
|
1224 |
-
"model.layers.32.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1225 |
-
"model.layers.32.self_attn.v_proj.bias": "model-
|
1226 |
-
"model.layers.32.self_attn.v_proj.weight": "model-
|
1227 |
-
"model.layers.32.self_attn.v_proj.weight.absmax": "model-
|
1228 |
-
"model.layers.32.self_attn.v_proj.weight.nested_absmax": "model-
|
1229 |
-
"model.layers.32.self_attn.v_proj.weight.nested_quant_map": "model-
|
1230 |
-
"model.layers.32.self_attn.v_proj.weight.quant_map": "model-
|
1231 |
-
"model.layers.32.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1232 |
-
"model.layers.33.input_layernorm.weight": "model-
|
1233 |
-
"model.layers.33.mlp.down_proj.weight": "model-
|
1234 |
-
"model.layers.33.mlp.down_proj.weight.absmax": "model-
|
1235 |
-
"model.layers.33.mlp.down_proj.weight.nested_absmax": "model-
|
1236 |
-
"model.layers.33.mlp.down_proj.weight.nested_quant_map": "model-
|
1237 |
-
"model.layers.33.mlp.down_proj.weight.quant_map": "model-
|
1238 |
-
"model.layers.33.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1239 |
-
"model.layers.33.mlp.gate_proj.weight": "model-
|
1240 |
-
"model.layers.33.mlp.gate_proj.weight.absmax": "model-
|
1241 |
-
"model.layers.33.mlp.gate_proj.weight.nested_absmax": "model-
|
1242 |
-
"model.layers.33.mlp.gate_proj.weight.nested_quant_map": "model-
|
1243 |
-
"model.layers.33.mlp.gate_proj.weight.quant_map": "model-
|
1244 |
-
"model.layers.33.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1245 |
-
"model.layers.33.mlp.up_proj.weight": "model-
|
1246 |
-
"model.layers.33.mlp.up_proj.weight.absmax": "model-
|
1247 |
-
"model.layers.33.mlp.up_proj.weight.nested_absmax": "model-
|
1248 |
-
"model.layers.33.mlp.up_proj.weight.nested_quant_map": "model-
|
1249 |
-
"model.layers.33.mlp.up_proj.weight.quant_map": "model-
|
1250 |
-
"model.layers.33.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1251 |
-
"model.layers.33.post_attention_layernorm.weight": "model-
|
1252 |
-
"model.layers.33.self_attn.k_proj.bias": "model-
|
1253 |
-
"model.layers.33.self_attn.k_proj.weight": "model-
|
1254 |
-
"model.layers.33.self_attn.k_proj.weight.absmax": "model-
|
1255 |
-
"model.layers.33.self_attn.k_proj.weight.nested_absmax": "model-
|
1256 |
-
"model.layers.33.self_attn.k_proj.weight.nested_quant_map": "model-
|
1257 |
-
"model.layers.33.self_attn.k_proj.weight.quant_map": "model-
|
1258 |
-
"model.layers.33.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1259 |
-
"model.layers.33.self_attn.o_proj.weight": "model-
|
1260 |
-
"model.layers.33.self_attn.o_proj.weight.absmax": "model-
|
1261 |
-
"model.layers.33.self_attn.o_proj.weight.nested_absmax": "model-
|
1262 |
-
"model.layers.33.self_attn.o_proj.weight.nested_quant_map": "model-
|
1263 |
-
"model.layers.33.self_attn.o_proj.weight.quant_map": "model-
|
1264 |
-
"model.layers.33.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1265 |
-
"model.layers.33.self_attn.q_proj.bias": "model-
|
1266 |
-
"model.layers.33.self_attn.q_proj.weight": "model-
|
1267 |
-
"model.layers.33.self_attn.q_proj.weight.absmax": "model-
|
1268 |
-
"model.layers.33.self_attn.q_proj.weight.nested_absmax": "model-
|
1269 |
-
"model.layers.33.self_attn.q_proj.weight.nested_quant_map": "model-
|
1270 |
-
"model.layers.33.self_attn.q_proj.weight.quant_map": "model-
|
1271 |
-
"model.layers.33.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1272 |
-
"model.layers.33.self_attn.v_proj.bias": "model-
|
1273 |
-
"model.layers.33.self_attn.v_proj.weight": "model-
|
1274 |
-
"model.layers.33.self_attn.v_proj.weight.absmax": "model-
|
1275 |
-
"model.layers.33.self_attn.v_proj.weight.nested_absmax": "model-
|
1276 |
-
"model.layers.33.self_attn.v_proj.weight.nested_quant_map": "model-
|
1277 |
-
"model.layers.33.self_attn.v_proj.weight.quant_map": "model-
|
1278 |
-
"model.layers.33.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1279 |
"model.layers.34.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
1280 |
"model.layers.34.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
1281 |
"model.layers.34.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
@@ -1283,12 +1283,12 @@
|
|
1283 |
"model.layers.34.mlp.down_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1284 |
"model.layers.34.mlp.down_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1285 |
"model.layers.34.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1286 |
-
"model.layers.34.mlp.gate_proj.weight": "model-
|
1287 |
-
"model.layers.34.mlp.gate_proj.weight.absmax": "model-
|
1288 |
-
"model.layers.34.mlp.gate_proj.weight.nested_absmax": "model-
|
1289 |
-
"model.layers.34.mlp.gate_proj.weight.nested_quant_map": "model-
|
1290 |
-
"model.layers.34.mlp.gate_proj.weight.quant_map": "model-
|
1291 |
-
"model.layers.34.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1292 |
"model.layers.34.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
1293 |
"model.layers.34.mlp.up_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1294 |
"model.layers.34.mlp.up_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
@@ -1296,33 +1296,33 @@
|
|
1296 |
"model.layers.34.mlp.up_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1297 |
"model.layers.34.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1298 |
"model.layers.34.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
1299 |
-
"model.layers.34.self_attn.k_proj.bias": "model-
|
1300 |
-
"model.layers.34.self_attn.k_proj.weight": "model-
|
1301 |
-
"model.layers.34.self_attn.k_proj.weight.absmax": "model-
|
1302 |
-
"model.layers.34.self_attn.k_proj.weight.nested_absmax": "model-
|
1303 |
-
"model.layers.34.self_attn.k_proj.weight.nested_quant_map": "model-
|
1304 |
-
"model.layers.34.self_attn.k_proj.weight.quant_map": "model-
|
1305 |
-
"model.layers.34.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1306 |
-
"model.layers.34.self_attn.o_proj.weight": "model-
|
1307 |
-
"model.layers.34.self_attn.o_proj.weight.absmax": "model-
|
1308 |
-
"model.layers.34.self_attn.o_proj.weight.nested_absmax": "model-
|
1309 |
-
"model.layers.34.self_attn.o_proj.weight.nested_quant_map": "model-
|
1310 |
-
"model.layers.34.self_attn.o_proj.weight.quant_map": "model-
|
1311 |
-
"model.layers.34.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1312 |
-
"model.layers.34.self_attn.q_proj.bias": "model-
|
1313 |
-
"model.layers.34.self_attn.q_proj.weight": "model-
|
1314 |
-
"model.layers.34.self_attn.q_proj.weight.absmax": "model-
|
1315 |
-
"model.layers.34.self_attn.q_proj.weight.nested_absmax": "model-
|
1316 |
-
"model.layers.34.self_attn.q_proj.weight.nested_quant_map": "model-
|
1317 |
-
"model.layers.34.self_attn.q_proj.weight.quant_map": "model-
|
1318 |
-
"model.layers.34.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1319 |
-
"model.layers.34.self_attn.v_proj.bias": "model-
|
1320 |
-
"model.layers.34.self_attn.v_proj.weight": "model-
|
1321 |
-
"model.layers.34.self_attn.v_proj.weight.absmax": "model-
|
1322 |
-
"model.layers.34.self_attn.v_proj.weight.nested_absmax": "model-
|
1323 |
-
"model.layers.34.self_attn.v_proj.weight.nested_quant_map": "model-
|
1324 |
-
"model.layers.34.self_attn.v_proj.weight.quant_map": "model-
|
1325 |
-
"model.layers.34.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
1326 |
"model.layers.35.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
1327 |
"model.layers.35.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
1328 |
"model.layers.35.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
@@ -2075,60 +2075,25 @@
|
|
2075 |
"model.layers.49.self_attn.v_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
2076 |
"model.layers.49.self_attn.v_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
2077 |
"model.layers.49.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
2078 |
-
"model.layers.5.input_layernorm.weight": "model-
|
2079 |
-
"model.layers.5.mlp.down_proj.weight": "model-
|
2080 |
-
"model.layers.5.mlp.down_proj.weight.absmax": "model-00001-of-00005.safetensors",
|
2081 |
-
"model.layers.5.mlp.down_proj.weight.nested_absmax": "model-00001-of-00005.safetensors",
|
2082 |
-
"model.layers.5.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00005.safetensors",
|
2083 |
-
"model.layers.5.mlp.down_proj.weight.quant_map": "model-00001-of-00005.safetensors",
|
2084 |
-
"model.layers.5.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00005.safetensors",
|
2085 |
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
2086 |
-
"model.layers.5.mlp.gate_proj.weight.absmax": "model-00001-of-00005.safetensors",
|
2087 |
-
"model.layers.5.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00005.safetensors",
|
2088 |
-
"model.layers.5.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00005.safetensors",
|
2089 |
-
"model.layers.5.mlp.gate_proj.weight.quant_map": "model-00001-of-00005.safetensors",
|
2090 |
-
"model.layers.5.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00005.safetensors",
|
2091 |
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
2092 |
-
"model.layers.5.
|
2093 |
-
"model.layers.5.mlp.up_proj.weight.nested_absmax": "model-00001-of-00005.safetensors",
|
2094 |
-
"model.layers.5.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00005.safetensors",
|
2095 |
-
"model.layers.5.mlp.up_proj.weight.quant_map": "model-00001-of-00005.safetensors",
|
2096 |
-
"model.layers.5.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00005.safetensors",
|
2097 |
-
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
|
2098 |
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
|
2099 |
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
2100 |
-
"model.layers.5.self_attn.k_proj.weight.absmax": "model-00001-of-00005.safetensors",
|
2101 |
-
"model.layers.5.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00005.safetensors",
|
2102 |
-
"model.layers.5.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00005.safetensors",
|
2103 |
-
"model.layers.5.self_attn.k_proj.weight.quant_map": "model-00001-of-00005.safetensors",
|
2104 |
-
"model.layers.5.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00005.safetensors",
|
2105 |
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
2106 |
-
"model.layers.5.self_attn.o_proj.weight.absmax": "model-00001-of-00005.safetensors",
|
2107 |
-
"model.layers.5.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00005.safetensors",
|
2108 |
-
"model.layers.5.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00005.safetensors",
|
2109 |
-
"model.layers.5.self_attn.o_proj.weight.quant_map": "model-00001-of-00005.safetensors",
|
2110 |
-
"model.layers.5.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00005.safetensors",
|
2111 |
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
2112 |
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
2113 |
-
"model.layers.5.self_attn.q_proj.weight.absmax": "model-00001-of-00005.safetensors",
|
2114 |
-
"model.layers.5.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00005.safetensors",
|
2115 |
-
"model.layers.5.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00005.safetensors",
|
2116 |
-
"model.layers.5.self_attn.q_proj.weight.quant_map": "model-00001-of-00005.safetensors",
|
2117 |
-
"model.layers.5.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00005.safetensors",
|
2118 |
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
2119 |
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
2120 |
-
"model.layers.
|
2121 |
-
"model.layers.
|
2122 |
-
"model.layers.
|
2123 |
-
"model.layers.
|
2124 |
-
"model.layers.
|
2125 |
-
"model.layers.50.
|
2126 |
-
"model.layers.50.mlp.down_proj.weight": "model-
|
2127 |
-
"model.layers.50.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
2128 |
-
"model.layers.50.mlp.down_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
2129 |
-
"model.layers.50.mlp.down_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
2130 |
-
"model.layers.50.mlp.down_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
2131 |
-
"model.layers.50.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
2132 |
"model.layers.50.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
2133 |
"model.layers.50.mlp.gate_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
2134 |
"model.layers.50.mlp.gate_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
@@ -2141,7 +2106,7 @@
|
|
2141 |
"model.layers.50.mlp.up_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
2142 |
"model.layers.50.mlp.up_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
2143 |
"model.layers.50.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
2144 |
-
"model.layers.50.post_attention_layernorm.weight": "model-
|
2145 |
"model.layers.50.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
2146 |
"model.layers.50.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
2147 |
"model.layers.50.self_attn.k_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
@@ -2169,147 +2134,147 @@
|
|
2169 |
"model.layers.50.self_attn.v_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
2170 |
"model.layers.50.self_attn.v_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
2171 |
"model.layers.50.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
2172 |
-
"model.layers.51.input_layernorm.weight": "model-
|
2173 |
-
"model.layers.51.mlp.down_proj.weight": "model-
|
2174 |
-
"model.layers.51.mlp.down_proj.weight.absmax": "model-
|
2175 |
-
"model.layers.51.mlp.down_proj.weight.nested_absmax": "model-
|
2176 |
-
"model.layers.51.mlp.down_proj.weight.nested_quant_map": "model-
|
2177 |
-
"model.layers.51.mlp.down_proj.weight.quant_map": "model-
|
2178 |
-
"model.layers.51.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2179 |
-
"model.layers.51.mlp.gate_proj.weight": "model-
|
2180 |
-
"model.layers.51.mlp.gate_proj.weight.absmax": "model-
|
2181 |
-
"model.layers.51.mlp.gate_proj.weight.nested_absmax": "model-
|
2182 |
-
"model.layers.51.mlp.gate_proj.weight.nested_quant_map": "model-
|
2183 |
-
"model.layers.51.mlp.gate_proj.weight.quant_map": "model-
|
2184 |
-
"model.layers.51.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2185 |
-
"model.layers.51.mlp.up_proj.weight": "model-
|
2186 |
-
"model.layers.51.mlp.up_proj.weight.absmax": "model-
|
2187 |
-
"model.layers.51.mlp.up_proj.weight.nested_absmax": "model-
|
2188 |
-
"model.layers.51.mlp.up_proj.weight.nested_quant_map": "model-
|
2189 |
-
"model.layers.51.mlp.up_proj.weight.quant_map": "model-
|
2190 |
-
"model.layers.51.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2191 |
-
"model.layers.51.post_attention_layernorm.weight": "model-
|
2192 |
-
"model.layers.51.self_attn.k_proj.bias": "model-
|
2193 |
-
"model.layers.51.self_attn.k_proj.weight": "model-
|
2194 |
-
"model.layers.51.self_attn.k_proj.weight.absmax": "model-
|
2195 |
-
"model.layers.51.self_attn.k_proj.weight.nested_absmax": "model-
|
2196 |
-
"model.layers.51.self_attn.k_proj.weight.nested_quant_map": "model-
|
2197 |
-
"model.layers.51.self_attn.k_proj.weight.quant_map": "model-
|
2198 |
-
"model.layers.51.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2199 |
-
"model.layers.51.self_attn.o_proj.weight": "model-
|
2200 |
-
"model.layers.51.self_attn.o_proj.weight.absmax": "model-
|
2201 |
-
"model.layers.51.self_attn.o_proj.weight.nested_absmax": "model-
|
2202 |
-
"model.layers.51.self_attn.o_proj.weight.nested_quant_map": "model-
|
2203 |
-
"model.layers.51.self_attn.o_proj.weight.quant_map": "model-
|
2204 |
-
"model.layers.51.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2205 |
-
"model.layers.51.self_attn.q_proj.bias": "model-
|
2206 |
-
"model.layers.51.self_attn.q_proj.weight": "model-
|
2207 |
-
"model.layers.51.self_attn.q_proj.weight.absmax": "model-
|
2208 |
-
"model.layers.51.self_attn.q_proj.weight.nested_absmax": "model-
|
2209 |
-
"model.layers.51.self_attn.q_proj.weight.nested_quant_map": "model-
|
2210 |
-
"model.layers.51.self_attn.q_proj.weight.quant_map": "model-
|
2211 |
-
"model.layers.51.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2212 |
-
"model.layers.51.self_attn.v_proj.bias": "model-
|
2213 |
-
"model.layers.51.self_attn.v_proj.weight": "model-
|
2214 |
-
"model.layers.51.self_attn.v_proj.weight.absmax": "model-
|
2215 |
-
"model.layers.51.self_attn.v_proj.weight.nested_absmax": "model-
|
2216 |
-
"model.layers.51.self_attn.v_proj.weight.nested_quant_map": "model-
|
2217 |
-
"model.layers.51.self_attn.v_proj.weight.quant_map": "model-
|
2218 |
-
"model.layers.51.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2219 |
-
"model.layers.52.input_layernorm.weight": "model-
|
2220 |
-
"model.layers.52.mlp.down_proj.weight": "model-
|
2221 |
-
"model.layers.52.mlp.down_proj.weight.absmax": "model-
|
2222 |
-
"model.layers.52.mlp.down_proj.weight.nested_absmax": "model-
|
2223 |
-
"model.layers.52.mlp.down_proj.weight.nested_quant_map": "model-
|
2224 |
-
"model.layers.52.mlp.down_proj.weight.quant_map": "model-
|
2225 |
-
"model.layers.52.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2226 |
-
"model.layers.52.mlp.gate_proj.weight": "model-
|
2227 |
-
"model.layers.52.mlp.gate_proj.weight.absmax": "model-
|
2228 |
-
"model.layers.52.mlp.gate_proj.weight.nested_absmax": "model-
|
2229 |
-
"model.layers.52.mlp.gate_proj.weight.nested_quant_map": "model-
|
2230 |
-
"model.layers.52.mlp.gate_proj.weight.quant_map": "model-
|
2231 |
-
"model.layers.52.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2232 |
-
"model.layers.52.mlp.up_proj.weight": "model-
|
2233 |
-
"model.layers.52.mlp.up_proj.weight.absmax": "model-
|
2234 |
-
"model.layers.52.mlp.up_proj.weight.nested_absmax": "model-
|
2235 |
-
"model.layers.52.mlp.up_proj.weight.nested_quant_map": "model-
|
2236 |
-
"model.layers.52.mlp.up_proj.weight.quant_map": "model-
|
2237 |
-
"model.layers.52.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2238 |
-
"model.layers.52.post_attention_layernorm.weight": "model-
|
2239 |
-
"model.layers.52.self_attn.k_proj.bias": "model-
|
2240 |
-
"model.layers.52.self_attn.k_proj.weight": "model-
|
2241 |
-
"model.layers.52.self_attn.k_proj.weight.absmax": "model-
|
2242 |
-
"model.layers.52.self_attn.k_proj.weight.nested_absmax": "model-
|
2243 |
-
"model.layers.52.self_attn.k_proj.weight.nested_quant_map": "model-
|
2244 |
-
"model.layers.52.self_attn.k_proj.weight.quant_map": "model-
|
2245 |
-
"model.layers.52.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2246 |
-
"model.layers.52.self_attn.o_proj.weight": "model-
|
2247 |
-
"model.layers.52.self_attn.o_proj.weight.absmax": "model-
|
2248 |
-
"model.layers.52.self_attn.o_proj.weight.nested_absmax": "model-
|
2249 |
-
"model.layers.52.self_attn.o_proj.weight.nested_quant_map": "model-
|
2250 |
-
"model.layers.52.self_attn.o_proj.weight.quant_map": "model-
|
2251 |
-
"model.layers.52.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2252 |
-
"model.layers.52.self_attn.q_proj.bias": "model-
|
2253 |
-
"model.layers.52.self_attn.q_proj.weight": "model-
|
2254 |
-
"model.layers.52.self_attn.q_proj.weight.absmax": "model-
|
2255 |
-
"model.layers.52.self_attn.q_proj.weight.nested_absmax": "model-
|
2256 |
-
"model.layers.52.self_attn.q_proj.weight.nested_quant_map": "model-
|
2257 |
-
"model.layers.52.self_attn.q_proj.weight.quant_map": "model-
|
2258 |
-
"model.layers.52.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2259 |
-
"model.layers.52.self_attn.v_proj.bias": "model-
|
2260 |
-
"model.layers.52.self_attn.v_proj.weight": "model-
|
2261 |
-
"model.layers.52.self_attn.v_proj.weight.absmax": "model-
|
2262 |
-
"model.layers.52.self_attn.v_proj.weight.nested_absmax": "model-
|
2263 |
-
"model.layers.52.self_attn.v_proj.weight.nested_quant_map": "model-
|
2264 |
-
"model.layers.52.self_attn.v_proj.weight.quant_map": "model-
|
2265 |
-
"model.layers.52.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2266 |
-
"model.layers.53.input_layernorm.weight": "model-
|
2267 |
-
"model.layers.53.mlp.down_proj.weight": "model-
|
2268 |
-
"model.layers.53.mlp.down_proj.weight.absmax": "model-
|
2269 |
-
"model.layers.53.mlp.down_proj.weight.nested_absmax": "model-
|
2270 |
-
"model.layers.53.mlp.down_proj.weight.nested_quant_map": "model-
|
2271 |
-
"model.layers.53.mlp.down_proj.weight.quant_map": "model-
|
2272 |
-
"model.layers.53.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2273 |
-
"model.layers.53.mlp.gate_proj.weight": "model-
|
2274 |
-
"model.layers.53.mlp.gate_proj.weight.absmax": "model-
|
2275 |
-
"model.layers.53.mlp.gate_proj.weight.nested_absmax": "model-
|
2276 |
-
"model.layers.53.mlp.gate_proj.weight.nested_quant_map": "model-
|
2277 |
-
"model.layers.53.mlp.gate_proj.weight.quant_map": "model-
|
2278 |
-
"model.layers.53.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2279 |
-
"model.layers.53.mlp.up_proj.weight": "model-
|
2280 |
-
"model.layers.53.mlp.up_proj.weight.absmax": "model-
|
2281 |
-
"model.layers.53.mlp.up_proj.weight.nested_absmax": "model-
|
2282 |
-
"model.layers.53.mlp.up_proj.weight.nested_quant_map": "model-
|
2283 |
-
"model.layers.53.mlp.up_proj.weight.quant_map": "model-
|
2284 |
-
"model.layers.53.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2285 |
-
"model.layers.53.post_attention_layernorm.weight": "model-
|
2286 |
-
"model.layers.53.self_attn.k_proj.bias": "model-
|
2287 |
-
"model.layers.53.self_attn.k_proj.weight": "model-
|
2288 |
-
"model.layers.53.self_attn.k_proj.weight.absmax": "model-
|
2289 |
-
"model.layers.53.self_attn.k_proj.weight.nested_absmax": "model-
|
2290 |
-
"model.layers.53.self_attn.k_proj.weight.nested_quant_map": "model-
|
2291 |
-
"model.layers.53.self_attn.k_proj.weight.quant_map": "model-
|
2292 |
-
"model.layers.53.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2293 |
-
"model.layers.53.self_attn.o_proj.weight": "model-
|
2294 |
-
"model.layers.53.self_attn.o_proj.weight.absmax": "model-
|
2295 |
-
"model.layers.53.self_attn.o_proj.weight.nested_absmax": "model-
|
2296 |
-
"model.layers.53.self_attn.o_proj.weight.nested_quant_map": "model-
|
2297 |
-
"model.layers.53.self_attn.o_proj.weight.quant_map": "model-
|
2298 |
-
"model.layers.53.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2299 |
-
"model.layers.53.self_attn.q_proj.bias": "model-
|
2300 |
-
"model.layers.53.self_attn.q_proj.weight": "model-
|
2301 |
-
"model.layers.53.self_attn.q_proj.weight.absmax": "model-
|
2302 |
-
"model.layers.53.self_attn.q_proj.weight.nested_absmax": "model-
|
2303 |
-
"model.layers.53.self_attn.q_proj.weight.nested_quant_map": "model-
|
2304 |
-
"model.layers.53.self_attn.q_proj.weight.quant_map": "model-
|
2305 |
-
"model.layers.53.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2306 |
-
"model.layers.53.self_attn.v_proj.bias": "model-
|
2307 |
-
"model.layers.53.self_attn.v_proj.weight": "model-
|
2308 |
-
"model.layers.53.self_attn.v_proj.weight.absmax": "model-
|
2309 |
-
"model.layers.53.self_attn.v_proj.weight.nested_absmax": "model-
|
2310 |
-
"model.layers.53.self_attn.v_proj.weight.nested_quant_map": "model-
|
2311 |
-
"model.layers.53.self_attn.v_proj.weight.quant_map": "model-
|
2312 |
-
"model.layers.53.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2313 |
"model.layers.54.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2314 |
"model.layers.54.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2315 |
"model.layers.54.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
@@ -2330,33 +2295,33 @@
|
|
2330 |
"model.layers.54.mlp.up_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2331 |
"model.layers.54.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2332 |
"model.layers.54.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
2333 |
-
"model.layers.54.self_attn.k_proj.bias": "model-
|
2334 |
-
"model.layers.54.self_attn.k_proj.weight": "model-
|
2335 |
-
"model.layers.54.self_attn.k_proj.weight.absmax": "model-
|
2336 |
-
"model.layers.54.self_attn.k_proj.weight.nested_absmax": "model-
|
2337 |
-
"model.layers.54.self_attn.k_proj.weight.nested_quant_map": "model-
|
2338 |
-
"model.layers.54.self_attn.k_proj.weight.quant_map": "model-
|
2339 |
-
"model.layers.54.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2340 |
-
"model.layers.54.self_attn.o_proj.weight": "model-
|
2341 |
-
"model.layers.54.self_attn.o_proj.weight.absmax": "model-
|
2342 |
-
"model.layers.54.self_attn.o_proj.weight.nested_absmax": "model-
|
2343 |
-
"model.layers.54.self_attn.o_proj.weight.nested_quant_map": "model-
|
2344 |
-
"model.layers.54.self_attn.o_proj.weight.quant_map": "model-
|
2345 |
-
"model.layers.54.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2346 |
-
"model.layers.54.self_attn.q_proj.bias": "model-
|
2347 |
-
"model.layers.54.self_attn.q_proj.weight": "model-
|
2348 |
-
"model.layers.54.self_attn.q_proj.weight.absmax": "model-
|
2349 |
-
"model.layers.54.self_attn.q_proj.weight.nested_absmax": "model-
|
2350 |
-
"model.layers.54.self_attn.q_proj.weight.nested_quant_map": "model-
|
2351 |
-
"model.layers.54.self_attn.q_proj.weight.quant_map": "model-
|
2352 |
-
"model.layers.54.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2353 |
-
"model.layers.54.self_attn.v_proj.bias": "model-
|
2354 |
-
"model.layers.54.self_attn.v_proj.weight": "model-
|
2355 |
-
"model.layers.54.self_attn.v_proj.weight.absmax": "model-
|
2356 |
-
"model.layers.54.self_attn.v_proj.weight.nested_absmax": "model-
|
2357 |
-
"model.layers.54.self_attn.v_proj.weight.nested_quant_map": "model-
|
2358 |
-
"model.layers.54.self_attn.v_proj.weight.quant_map": "model-
|
2359 |
-
"model.layers.54.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2360 |
"model.layers.55.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2361 |
"model.layers.55.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2362 |
"model.layers.55.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
@@ -2594,36 +2559,36 @@
|
|
2594 |
"model.layers.59.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2595 |
"model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
2596 |
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
2597 |
-
"model.layers.6.mlp.gate_proj.weight": "model-
|
2598 |
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
2599 |
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
2600 |
-
"model.layers.6.self_attn.k_proj.bias": "model-
|
2601 |
-
"model.layers.6.self_attn.k_proj.weight": "model-
|
2602 |
-
"model.layers.6.self_attn.k_proj.weight.absmax": "model-
|
2603 |
-
"model.layers.6.self_attn.k_proj.weight.nested_absmax": "model-
|
2604 |
-
"model.layers.6.self_attn.k_proj.weight.nested_quant_map": "model-
|
2605 |
-
"model.layers.6.self_attn.k_proj.weight.quant_map": "model-
|
2606 |
-
"model.layers.6.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2607 |
-
"model.layers.6.self_attn.o_proj.weight": "model-
|
2608 |
-
"model.layers.6.self_attn.o_proj.weight.absmax": "model-
|
2609 |
-
"model.layers.6.self_attn.o_proj.weight.nested_absmax": "model-
|
2610 |
-
"model.layers.6.self_attn.o_proj.weight.nested_quant_map": "model-
|
2611 |
-
"model.layers.6.self_attn.o_proj.weight.quant_map": "model-
|
2612 |
-
"model.layers.6.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2613 |
-
"model.layers.6.self_attn.q_proj.bias": "model-
|
2614 |
-
"model.layers.6.self_attn.q_proj.weight": "model-
|
2615 |
-
"model.layers.6.self_attn.q_proj.weight.absmax": "model-
|
2616 |
-
"model.layers.6.self_attn.q_proj.weight.nested_absmax": "model-
|
2617 |
-
"model.layers.6.self_attn.q_proj.weight.nested_quant_map": "model-
|
2618 |
-
"model.layers.6.self_attn.q_proj.weight.quant_map": "model-
|
2619 |
-
"model.layers.6.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2620 |
-
"model.layers.6.self_attn.v_proj.bias": "model-
|
2621 |
-
"model.layers.6.self_attn.v_proj.weight": "model-
|
2622 |
-
"model.layers.6.self_attn.v_proj.weight.absmax": "model-
|
2623 |
-
"model.layers.6.self_attn.v_proj.weight.nested_absmax": "model-
|
2624 |
-
"model.layers.6.self_attn.v_proj.weight.nested_quant_map": "model-
|
2625 |
-
"model.layers.6.self_attn.v_proj.weight.quant_map": "model-
|
2626 |
-
"model.layers.6.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-
|
2627 |
"model.layers.60.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2628 |
"model.layers.60.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2629 |
"model.layers.60.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
@@ -2814,8 +2779,23 @@
|
|
2814 |
"model.layers.63.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2815 |
"model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
2816 |
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2817 |
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2818 |
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2819 |
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
2820 |
"model.layers.7.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
2821 |
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
@@ -2846,23 +2826,8 @@
|
|
2846 |
"model.layers.7.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2847 |
"model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
2848 |
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
2849 |
-
"model.layers.8.mlp.down_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2850 |
-
"model.layers.8.mlp.down_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2851 |
-
"model.layers.8.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2852 |
-
"model.layers.8.mlp.down_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2853 |
-
"model.layers.8.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2854 |
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
2855 |
-
"model.layers.8.mlp.gate_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2856 |
-
"model.layers.8.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2857 |
-
"model.layers.8.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2858 |
-
"model.layers.8.mlp.gate_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2859 |
-
"model.layers.8.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2860 |
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
2861 |
-
"model.layers.8.mlp.up_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2862 |
-
"model.layers.8.mlp.up_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2863 |
-
"model.layers.8.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2864 |
-
"model.layers.8.mlp.up_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2865 |
-
"model.layers.8.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2866 |
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
2867 |
"model.layers.8.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
2868 |
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
+
"total_size": 24467745165
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"lm_head.weight": "model-00005-of-00005.safetensors",
|
|
|
319 |
"model.layers.14.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
320 |
"model.layers.14.self_attn.v_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
321 |
"model.layers.14.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
322 |
+
"model.layers.15.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
323 |
+
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
324 |
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
325 |
+
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
326 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
327 |
"model.layers.15.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
328 |
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
329 |
"model.layers.15.self_attn.k_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
|
|
351 |
"model.layers.15.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
352 |
"model.layers.15.self_attn.v_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
353 |
"model.layers.15.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
354 |
+
"model.layers.16.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
355 |
+
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
356 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
357 |
+
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
358 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
359 |
+
"model.layers.16.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
360 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
361 |
+
"model.layers.16.self_attn.k_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
362 |
+
"model.layers.16.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
363 |
+
"model.layers.16.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
364 |
+
"model.layers.16.self_attn.k_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
365 |
+
"model.layers.16.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
366 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
367 |
+
"model.layers.16.self_attn.o_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
368 |
+
"model.layers.16.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
369 |
+
"model.layers.16.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
370 |
+
"model.layers.16.self_attn.o_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
371 |
+
"model.layers.16.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
372 |
+
"model.layers.16.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
373 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
374 |
+
"model.layers.16.self_attn.q_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
375 |
+
"model.layers.16.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
376 |
+
"model.layers.16.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
377 |
+
"model.layers.16.self_attn.q_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
378 |
+
"model.layers.16.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
379 |
+
"model.layers.16.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
380 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
381 |
+
"model.layers.16.self_attn.v_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
382 |
+
"model.layers.16.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
383 |
+
"model.layers.16.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
384 |
+
"model.layers.16.self_attn.v_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
385 |
+
"model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
"model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
387 |
"model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
388 |
"model.layers.17.mlp.down_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
|
|
403 |
"model.layers.17.mlp.up_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
404 |
"model.layers.17.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
405 |
"model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
406 |
+
"model.layers.17.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
407 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
408 |
+
"model.layers.17.self_attn.k_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
409 |
+
"model.layers.17.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
410 |
+
"model.layers.17.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
411 |
+
"model.layers.17.self_attn.k_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
412 |
+
"model.layers.17.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
413 |
"model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
|
414 |
"model.layers.17.self_attn.o_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
415 |
"model.layers.17.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
416 |
"model.layers.17.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
417 |
"model.layers.17.self_attn.o_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
418 |
"model.layers.17.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
419 |
+
"model.layers.17.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
420 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
421 |
+
"model.layers.17.self_attn.q_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
422 |
+
"model.layers.17.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
423 |
+
"model.layers.17.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
424 |
+
"model.layers.17.self_attn.q_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
425 |
+
"model.layers.17.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
426 |
+
"model.layers.17.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
|
427 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
|
428 |
+
"model.layers.17.self_attn.v_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
429 |
+
"model.layers.17.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
430 |
+
"model.layers.17.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
431 |
+
"model.layers.17.self_attn.v_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
432 |
+
"model.layers.17.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
433 |
"model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
434 |
"model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
435 |
"model.layers.18.mlp.down_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
|
|
667 |
"model.layers.21.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
668 |
"model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors",
|
669 |
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
|
670 |
+
"model.layers.22.mlp.down_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
671 |
+
"model.layers.22.mlp.down_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
672 |
+
"model.layers.22.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
673 |
+
"model.layers.22.mlp.down_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
674 |
+
"model.layers.22.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
675 |
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
|
676 |
+
"model.layers.22.mlp.gate_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
677 |
+
"model.layers.22.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
678 |
+
"model.layers.22.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
679 |
+
"model.layers.22.mlp.gate_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
680 |
+
"model.layers.22.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
681 |
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
|
682 |
+
"model.layers.22.mlp.up_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
683 |
+
"model.layers.22.mlp.up_proj.weight.nested_absmax": "model-00003-of-00005.safetensors",
|
684 |
+
"model.layers.22.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
685 |
+
"model.layers.22.mlp.up_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
686 |
+
"model.layers.22.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
687 |
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
|
688 |
"model.layers.22.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
689 |
"model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
|
|
1135 |
"model.layers.30.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
1136 |
"model.layers.30.self_attn.v_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
1137 |
"model.layers.30.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
1138 |
+
"model.layers.31.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
1139 |
+
"model.layers.31.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
1140 |
+
"model.layers.31.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1141 |
+
"model.layers.31.mlp.down_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1142 |
+
"model.layers.31.mlp.down_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1143 |
+
"model.layers.31.mlp.down_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1144 |
+
"model.layers.31.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1145 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
1146 |
+
"model.layers.31.mlp.gate_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1147 |
+
"model.layers.31.mlp.gate_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1148 |
+
"model.layers.31.mlp.gate_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1149 |
+
"model.layers.31.mlp.gate_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1150 |
+
"model.layers.31.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1151 |
+
"model.layers.31.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
1152 |
+
"model.layers.31.mlp.up_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1153 |
+
"model.layers.31.mlp.up_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1154 |
+
"model.layers.31.mlp.up_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1155 |
+
"model.layers.31.mlp.up_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1156 |
+
"model.layers.31.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1157 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
1158 |
"model.layers.31.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
|
1159 |
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
|
1160 |
"model.layers.31.self_attn.k_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
|
|
1162 |
"model.layers.31.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
1163 |
"model.layers.31.self_attn.k_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
1164 |
"model.layers.31.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
1165 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
1166 |
+
"model.layers.31.self_attn.o_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1167 |
+
"model.layers.31.self_attn.o_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1168 |
+
"model.layers.31.self_attn.o_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1169 |
+
"model.layers.31.self_attn.o_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1170 |
+
"model.layers.31.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1171 |
"model.layers.31.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
|
1172 |
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
|
1173 |
"model.layers.31.self_attn.q_proj.weight.absmax": "model-00003-of-00005.safetensors",
|
|
|
1182 |
"model.layers.31.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00005.safetensors",
|
1183 |
"model.layers.31.self_attn.v_proj.weight.quant_map": "model-00003-of-00005.safetensors",
|
1184 |
"model.layers.31.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00005.safetensors",
|
1185 |
+
"model.layers.32.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
1186 |
+
"model.layers.32.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
1187 |
+
"model.layers.32.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1188 |
+
"model.layers.32.mlp.down_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1189 |
+
"model.layers.32.mlp.down_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1190 |
+
"model.layers.32.mlp.down_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1191 |
+
"model.layers.32.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1192 |
+
"model.layers.32.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
1193 |
+
"model.layers.32.mlp.gate_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1194 |
+
"model.layers.32.mlp.gate_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1195 |
+
"model.layers.32.mlp.gate_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1196 |
+
"model.layers.32.mlp.gate_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1197 |
+
"model.layers.32.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1198 |
+
"model.layers.32.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
1199 |
+
"model.layers.32.mlp.up_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1200 |
+
"model.layers.32.mlp.up_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1201 |
+
"model.layers.32.mlp.up_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1202 |
+
"model.layers.32.mlp.up_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1203 |
+
"model.layers.32.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1204 |
+
"model.layers.32.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
1205 |
+
"model.layers.32.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
1206 |
+
"model.layers.32.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
1207 |
+
"model.layers.32.self_attn.k_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1208 |
+
"model.layers.32.self_attn.k_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1209 |
+
"model.layers.32.self_attn.k_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1210 |
+
"model.layers.32.self_attn.k_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1211 |
+
"model.layers.32.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1212 |
+
"model.layers.32.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
1213 |
+
"model.layers.32.self_attn.o_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1214 |
+
"model.layers.32.self_attn.o_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1215 |
+
"model.layers.32.self_attn.o_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1216 |
+
"model.layers.32.self_attn.o_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1217 |
+
"model.layers.32.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1218 |
+
"model.layers.32.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
1219 |
+
"model.layers.32.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
1220 |
+
"model.layers.32.self_attn.q_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1221 |
+
"model.layers.32.self_attn.q_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1222 |
+
"model.layers.32.self_attn.q_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1223 |
+
"model.layers.32.self_attn.q_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1224 |
+
"model.layers.32.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1225 |
+
"model.layers.32.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
1226 |
+
"model.layers.32.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
1227 |
+
"model.layers.32.self_attn.v_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1228 |
+
"model.layers.32.self_attn.v_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1229 |
+
"model.layers.32.self_attn.v_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1230 |
+
"model.layers.32.self_attn.v_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1231 |
+
"model.layers.32.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1232 |
+
"model.layers.33.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
1233 |
+
"model.layers.33.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
1234 |
+
"model.layers.33.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1235 |
+
"model.layers.33.mlp.down_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1236 |
+
"model.layers.33.mlp.down_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1237 |
+
"model.layers.33.mlp.down_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1238 |
+
"model.layers.33.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1239 |
+
"model.layers.33.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
1240 |
+
"model.layers.33.mlp.gate_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1241 |
+
"model.layers.33.mlp.gate_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1242 |
+
"model.layers.33.mlp.gate_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1243 |
+
"model.layers.33.mlp.gate_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1244 |
+
"model.layers.33.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1245 |
+
"model.layers.33.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
1246 |
+
"model.layers.33.mlp.up_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1247 |
+
"model.layers.33.mlp.up_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1248 |
+
"model.layers.33.mlp.up_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1249 |
+
"model.layers.33.mlp.up_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1250 |
+
"model.layers.33.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1251 |
+
"model.layers.33.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
1252 |
+
"model.layers.33.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
1253 |
+
"model.layers.33.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
1254 |
+
"model.layers.33.self_attn.k_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1255 |
+
"model.layers.33.self_attn.k_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1256 |
+
"model.layers.33.self_attn.k_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1257 |
+
"model.layers.33.self_attn.k_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1258 |
+
"model.layers.33.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1259 |
+
"model.layers.33.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
1260 |
+
"model.layers.33.self_attn.o_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1261 |
+
"model.layers.33.self_attn.o_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1262 |
+
"model.layers.33.self_attn.o_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1263 |
+
"model.layers.33.self_attn.o_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1264 |
+
"model.layers.33.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1265 |
+
"model.layers.33.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
1266 |
+
"model.layers.33.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
1267 |
+
"model.layers.33.self_attn.q_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1268 |
+
"model.layers.33.self_attn.q_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1269 |
+
"model.layers.33.self_attn.q_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1270 |
+
"model.layers.33.self_attn.q_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1271 |
+
"model.layers.33.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1272 |
+
"model.layers.33.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
1273 |
+
"model.layers.33.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
1274 |
+
"model.layers.33.self_attn.v_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1275 |
+
"model.layers.33.self_attn.v_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1276 |
+
"model.layers.33.self_attn.v_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1277 |
+
"model.layers.33.self_attn.v_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1278 |
+
"model.layers.33.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1279 |
"model.layers.34.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
1280 |
"model.layers.34.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
1281 |
"model.layers.34.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
|
|
1283 |
"model.layers.34.mlp.down_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1284 |
"model.layers.34.mlp.down_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1285 |
"model.layers.34.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1286 |
+
"model.layers.34.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
1287 |
+
"model.layers.34.mlp.gate_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1288 |
+
"model.layers.34.mlp.gate_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1289 |
+
"model.layers.34.mlp.gate_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1290 |
+
"model.layers.34.mlp.gate_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1291 |
+
"model.layers.34.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1292 |
"model.layers.34.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
|
1293 |
"model.layers.34.mlp.up_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1294 |
"model.layers.34.mlp.up_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
|
|
1296 |
"model.layers.34.mlp.up_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1297 |
"model.layers.34.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1298 |
"model.layers.34.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
|
1299 |
+
"model.layers.34.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
1300 |
+
"model.layers.34.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
1301 |
+
"model.layers.34.self_attn.k_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1302 |
+
"model.layers.34.self_attn.k_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1303 |
+
"model.layers.34.self_attn.k_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1304 |
+
"model.layers.34.self_attn.k_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1305 |
+
"model.layers.34.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1306 |
+
"model.layers.34.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
|
1307 |
+
"model.layers.34.self_attn.o_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1308 |
+
"model.layers.34.self_attn.o_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1309 |
+
"model.layers.34.self_attn.o_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1310 |
+
"model.layers.34.self_attn.o_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1311 |
+
"model.layers.34.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1312 |
+
"model.layers.34.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
|
1313 |
+
"model.layers.34.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
|
1314 |
+
"model.layers.34.self_attn.q_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1315 |
+
"model.layers.34.self_attn.q_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1316 |
+
"model.layers.34.self_attn.q_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1317 |
+
"model.layers.34.self_attn.q_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1318 |
+
"model.layers.34.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1319 |
+
"model.layers.34.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
|
1320 |
+
"model.layers.34.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
|
1321 |
+
"model.layers.34.self_attn.v_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
1322 |
+
"model.layers.34.self_attn.v_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
1323 |
+
"model.layers.34.self_attn.v_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
1324 |
+
"model.layers.34.self_attn.v_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
1325 |
+
"model.layers.34.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
1326 |
"model.layers.35.input_layernorm.weight": "model-00004-of-00005.safetensors",
|
1327 |
"model.layers.35.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
|
1328 |
"model.layers.35.mlp.down_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
|
|
2075 |
"model.layers.49.self_attn.v_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
2076 |
"model.layers.49.self_attn.v_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
2077 |
"model.layers.49.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
2078 |
+
"model.layers.5.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
2079 |
+
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2080 |
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2081 |
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
|
2082 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2083 |
"model.layers.5.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
|
2084 |
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2085 |
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2086 |
"model.layers.5.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
|
2087 |
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2088 |
"model.layers.5.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
|
2089 |
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
|
2090 |
+
"model.layers.50.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2091 |
+
"model.layers.50.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2092 |
+
"model.layers.50.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2093 |
+
"model.layers.50.mlp.down_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2094 |
+
"model.layers.50.mlp.down_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2095 |
+
"model.layers.50.mlp.down_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2096 |
+
"model.layers.50.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2097 |
"model.layers.50.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
|
2098 |
"model.layers.50.mlp.gate_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
2099 |
"model.layers.50.mlp.gate_proj.weight.nested_absmax": "model-00004-of-00005.safetensors",
|
|
|
2106 |
"model.layers.50.mlp.up_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
2107 |
"model.layers.50.mlp.up_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
2108 |
"model.layers.50.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
2109 |
+
"model.layers.50.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
2110 |
"model.layers.50.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
|
2111 |
"model.layers.50.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
|
2112 |
"model.layers.50.self_attn.k_proj.weight.absmax": "model-00004-of-00005.safetensors",
|
|
|
2134 |
"model.layers.50.self_attn.v_proj.weight.nested_quant_map": "model-00004-of-00005.safetensors",
|
2135 |
"model.layers.50.self_attn.v_proj.weight.quant_map": "model-00004-of-00005.safetensors",
|
2136 |
"model.layers.50.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00004-of-00005.safetensors",
|
2137 |
+
"model.layers.51.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2138 |
+
"model.layers.51.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2139 |
+
"model.layers.51.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2140 |
+
"model.layers.51.mlp.down_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2141 |
+
"model.layers.51.mlp.down_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2142 |
+
"model.layers.51.mlp.down_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2143 |
+
"model.layers.51.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2144 |
+
"model.layers.51.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
2145 |
+
"model.layers.51.mlp.gate_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2146 |
+
"model.layers.51.mlp.gate_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2147 |
+
"model.layers.51.mlp.gate_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2148 |
+
"model.layers.51.mlp.gate_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2149 |
+
"model.layers.51.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2150 |
+
"model.layers.51.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
2151 |
+
"model.layers.51.mlp.up_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2152 |
+
"model.layers.51.mlp.up_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2153 |
+
"model.layers.51.mlp.up_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2154 |
+
"model.layers.51.mlp.up_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2155 |
+
"model.layers.51.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2156 |
+
"model.layers.51.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
2157 |
+
"model.layers.51.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
|
2158 |
+
"model.layers.51.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
2159 |
+
"model.layers.51.self_attn.k_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2160 |
+
"model.layers.51.self_attn.k_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2161 |
+
"model.layers.51.self_attn.k_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2162 |
+
"model.layers.51.self_attn.k_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2163 |
+
"model.layers.51.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2164 |
+
"model.layers.51.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
2165 |
+
"model.layers.51.self_attn.o_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2166 |
+
"model.layers.51.self_attn.o_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2167 |
+
"model.layers.51.self_attn.o_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2168 |
+
"model.layers.51.self_attn.o_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2169 |
+
"model.layers.51.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2170 |
+
"model.layers.51.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
|
2171 |
+
"model.layers.51.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
2172 |
+
"model.layers.51.self_attn.q_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2173 |
+
"model.layers.51.self_attn.q_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2174 |
+
"model.layers.51.self_attn.q_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2175 |
+
"model.layers.51.self_attn.q_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2176 |
+
"model.layers.51.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2177 |
+
"model.layers.51.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
|
2178 |
+
"model.layers.51.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
2179 |
+
"model.layers.51.self_attn.v_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2180 |
+
"model.layers.51.self_attn.v_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2181 |
+
"model.layers.51.self_attn.v_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2182 |
+
"model.layers.51.self_attn.v_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2183 |
+
"model.layers.51.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2184 |
+
"model.layers.52.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2185 |
+
"model.layers.52.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2186 |
+
"model.layers.52.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2187 |
+
"model.layers.52.mlp.down_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2188 |
+
"model.layers.52.mlp.down_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2189 |
+
"model.layers.52.mlp.down_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2190 |
+
"model.layers.52.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2191 |
+
"model.layers.52.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
2192 |
+
"model.layers.52.mlp.gate_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2193 |
+
"model.layers.52.mlp.gate_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2194 |
+
"model.layers.52.mlp.gate_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2195 |
+
"model.layers.52.mlp.gate_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2196 |
+
"model.layers.52.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2197 |
+
"model.layers.52.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
2198 |
+
"model.layers.52.mlp.up_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2199 |
+
"model.layers.52.mlp.up_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2200 |
+
"model.layers.52.mlp.up_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2201 |
+
"model.layers.52.mlp.up_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2202 |
+
"model.layers.52.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2203 |
+
"model.layers.52.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
2204 |
+
"model.layers.52.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
|
2205 |
+
"model.layers.52.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
2206 |
+
"model.layers.52.self_attn.k_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2207 |
+
"model.layers.52.self_attn.k_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2208 |
+
"model.layers.52.self_attn.k_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2209 |
+
"model.layers.52.self_attn.k_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2210 |
+
"model.layers.52.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2211 |
+
"model.layers.52.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
2212 |
+
"model.layers.52.self_attn.o_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2213 |
+
"model.layers.52.self_attn.o_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2214 |
+
"model.layers.52.self_attn.o_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2215 |
+
"model.layers.52.self_attn.o_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2216 |
+
"model.layers.52.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2217 |
+
"model.layers.52.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
|
2218 |
+
"model.layers.52.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
2219 |
+
"model.layers.52.self_attn.q_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2220 |
+
"model.layers.52.self_attn.q_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2221 |
+
"model.layers.52.self_attn.q_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2222 |
+
"model.layers.52.self_attn.q_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2223 |
+
"model.layers.52.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2224 |
+
"model.layers.52.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
|
2225 |
+
"model.layers.52.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
2226 |
+
"model.layers.52.self_attn.v_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2227 |
+
"model.layers.52.self_attn.v_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2228 |
+
"model.layers.52.self_attn.v_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2229 |
+
"model.layers.52.self_attn.v_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2230 |
+
"model.layers.52.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2231 |
+
"model.layers.53.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2232 |
+
"model.layers.53.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2233 |
+
"model.layers.53.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2234 |
+
"model.layers.53.mlp.down_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2235 |
+
"model.layers.53.mlp.down_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2236 |
+
"model.layers.53.mlp.down_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2237 |
+
"model.layers.53.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2238 |
+
"model.layers.53.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
|
2239 |
+
"model.layers.53.mlp.gate_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2240 |
+
"model.layers.53.mlp.gate_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2241 |
+
"model.layers.53.mlp.gate_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2242 |
+
"model.layers.53.mlp.gate_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2243 |
+
"model.layers.53.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2244 |
+
"model.layers.53.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
|
2245 |
+
"model.layers.53.mlp.up_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2246 |
+
"model.layers.53.mlp.up_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2247 |
+
"model.layers.53.mlp.up_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2248 |
+
"model.layers.53.mlp.up_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2249 |
+
"model.layers.53.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2250 |
+
"model.layers.53.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
2251 |
+
"model.layers.53.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
|
2252 |
+
"model.layers.53.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
2253 |
+
"model.layers.53.self_attn.k_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2254 |
+
"model.layers.53.self_attn.k_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2255 |
+
"model.layers.53.self_attn.k_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2256 |
+
"model.layers.53.self_attn.k_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2257 |
+
"model.layers.53.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2258 |
+
"model.layers.53.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
2259 |
+
"model.layers.53.self_attn.o_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2260 |
+
"model.layers.53.self_attn.o_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2261 |
+
"model.layers.53.self_attn.o_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2262 |
+
"model.layers.53.self_attn.o_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2263 |
+
"model.layers.53.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2264 |
+
"model.layers.53.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
|
2265 |
+
"model.layers.53.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
2266 |
+
"model.layers.53.self_attn.q_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2267 |
+
"model.layers.53.self_attn.q_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2268 |
+
"model.layers.53.self_attn.q_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2269 |
+
"model.layers.53.self_attn.q_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2270 |
+
"model.layers.53.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2271 |
+
"model.layers.53.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
|
2272 |
+
"model.layers.53.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
2273 |
+
"model.layers.53.self_attn.v_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2274 |
+
"model.layers.53.self_attn.v_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2275 |
+
"model.layers.53.self_attn.v_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2276 |
+
"model.layers.53.self_attn.v_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2277 |
+
"model.layers.53.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2278 |
"model.layers.54.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2279 |
"model.layers.54.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2280 |
"model.layers.54.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
|
|
2295 |
"model.layers.54.mlp.up_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2296 |
"model.layers.54.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2297 |
"model.layers.54.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
|
2298 |
+
"model.layers.54.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
|
2299 |
+
"model.layers.54.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
|
2300 |
+
"model.layers.54.self_attn.k_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2301 |
+
"model.layers.54.self_attn.k_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2302 |
+
"model.layers.54.self_attn.k_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2303 |
+
"model.layers.54.self_attn.k_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2304 |
+
"model.layers.54.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2305 |
+
"model.layers.54.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
|
2306 |
+
"model.layers.54.self_attn.o_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2307 |
+
"model.layers.54.self_attn.o_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2308 |
+
"model.layers.54.self_attn.o_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2309 |
+
"model.layers.54.self_attn.o_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2310 |
+
"model.layers.54.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2311 |
+
"model.layers.54.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
|
2312 |
+
"model.layers.54.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
|
2313 |
+
"model.layers.54.self_attn.q_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2314 |
+
"model.layers.54.self_attn.q_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2315 |
+
"model.layers.54.self_attn.q_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2316 |
+
"model.layers.54.self_attn.q_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2317 |
+
"model.layers.54.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2318 |
+
"model.layers.54.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
|
2319 |
+
"model.layers.54.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
|
2320 |
+
"model.layers.54.self_attn.v_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
2321 |
+
"model.layers.54.self_attn.v_proj.weight.nested_absmax": "model-00005-of-00005.safetensors",
|
2322 |
+
"model.layers.54.self_attn.v_proj.weight.nested_quant_map": "model-00005-of-00005.safetensors",
|
2323 |
+
"model.layers.54.self_attn.v_proj.weight.quant_map": "model-00005-of-00005.safetensors",
|
2324 |
+
"model.layers.54.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2325 |
"model.layers.55.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2326 |
"model.layers.55.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2327 |
"model.layers.55.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
|
|
2559 |
"model.layers.59.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2560 |
"model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
2561 |
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
2562 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
2563 |
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
2564 |
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
2565 |
+
"model.layers.6.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
2566 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
2567 |
+
"model.layers.6.self_attn.k_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2568 |
+
"model.layers.6.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2569 |
+
"model.layers.6.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2570 |
+
"model.layers.6.self_attn.k_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2571 |
+
"model.layers.6.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2572 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
|
2573 |
+
"model.layers.6.self_attn.o_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2574 |
+
"model.layers.6.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2575 |
+
"model.layers.6.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2576 |
+
"model.layers.6.self_attn.o_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2577 |
+
"model.layers.6.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2578 |
+
"model.layers.6.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
|
2579 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
|
2580 |
+
"model.layers.6.self_attn.q_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2581 |
+
"model.layers.6.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2582 |
+
"model.layers.6.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2583 |
+
"model.layers.6.self_attn.q_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2584 |
+
"model.layers.6.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2585 |
+
"model.layers.6.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
|
2586 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
|
2587 |
+
"model.layers.6.self_attn.v_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2588 |
+
"model.layers.6.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2589 |
+
"model.layers.6.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2590 |
+
"model.layers.6.self_attn.v_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2591 |
+
"model.layers.6.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2592 |
"model.layers.60.input_layernorm.weight": "model-00005-of-00005.safetensors",
|
2593 |
"model.layers.60.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
|
2594 |
"model.layers.60.mlp.down_proj.weight.absmax": "model-00005-of-00005.safetensors",
|
|
|
2779 |
"model.layers.63.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00005-of-00005.safetensors",
|
2780 |
"model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
2781 |
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
2782 |
+
"model.layers.7.mlp.down_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2783 |
+
"model.layers.7.mlp.down_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2784 |
+
"model.layers.7.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2785 |
+
"model.layers.7.mlp.down_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2786 |
+
"model.layers.7.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2787 |
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
2788 |
+
"model.layers.7.mlp.gate_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2789 |
+
"model.layers.7.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2790 |
+
"model.layers.7.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2791 |
+
"model.layers.7.mlp.gate_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2792 |
+
"model.layers.7.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2793 |
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
2794 |
+
"model.layers.7.mlp.up_proj.weight.absmax": "model-00002-of-00005.safetensors",
|
2795 |
+
"model.layers.7.mlp.up_proj.weight.nested_absmax": "model-00002-of-00005.safetensors",
|
2796 |
+
"model.layers.7.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00005.safetensors",
|
2797 |
+
"model.layers.7.mlp.up_proj.weight.quant_map": "model-00002-of-00005.safetensors",
|
2798 |
+
"model.layers.7.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2799 |
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
2800 |
"model.layers.7.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
2801 |
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
2826 |
"model.layers.7.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00005.safetensors",
|
2827 |
"model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors",
|
2828 |
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2829 |
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2830 |
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
|
|
|
|
|
|
|
|
|
|
|
2831 |
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
|
2832 |
"model.layers.8.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
|
2833 |
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
|
tokenizer_config.json
CHANGED
@@ -195,16 +195,16 @@
|
|
195 |
"<|video_pad|>"
|
196 |
],
|
197 |
"bos_token": null,
|
198 |
-
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
|
199 |
"clean_up_tokenization_spaces": false,
|
200 |
"eos_token": "<|im_end|>",
|
201 |
"errors": "replace",
|
202 |
"extra_special_tokens": {},
|
203 |
-
"model_max_length":
|
204 |
"pad_token": "<|vision_pad|>",
|
205 |
"padding_side": "left",
|
206 |
"processor_class": "Qwen2_5_VLProcessor",
|
207 |
"split_special_tokens": false,
|
208 |
"tokenizer_class": "Qwen2Tokenizer",
|
209 |
-
"unk_token": null
|
210 |
-
}
|
|
|
|
195 |
"<|video_pad|>"
|
196 |
],
|
197 |
"bos_token": null,
|
|
|
198 |
"clean_up_tokenization_spaces": false,
|
199 |
"eos_token": "<|im_end|>",
|
200 |
"errors": "replace",
|
201 |
"extra_special_tokens": {},
|
202 |
+
"model_max_length": 128000,
|
203 |
"pad_token": "<|vision_pad|>",
|
204 |
"padding_side": "left",
|
205 |
"processor_class": "Qwen2_5_VLProcessor",
|
206 |
"split_special_tokens": false,
|
207 |
"tokenizer_class": "Qwen2Tokenizer",
|
208 |
+
"unk_token": null,
|
209 |
+
"chat_template": "{% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% endif %}<|im_start|>{{ message['role'] }}\n{% if message['content'] is string %}{{ message['content'] }}<|im_end|>\n{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant\n{% endif %}"
|
210 |
+
}
|