ehartford commited on
Commit
9dce586
·
verified ·
1 Parent(s): 6d71478

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .ipynb_checkpoints/config-checkpoint.json +70 -0
  2. config.json +70 -0
  3. configuration_deepseek.py +210 -0
  4. inference/.ipynb_checkpoints/convert-checkpoint.py +84 -0
  5. inference/.ipynb_checkpoints/fp8_cast_bf16-checkpoint.py +81 -0
  6. inference/.ipynb_checkpoints/generate-checkpoint.py +137 -0
  7. inference/.ipynb_checkpoints/kernel-checkpoint.py +108 -0
  8. inference/.ipynb_checkpoints/model-checkpoint.py +421 -0
  9. inference/__pycache__/kernel.cpython-310.pyc +0 -0
  10. inference/configs/config_16B.json +19 -0
  11. inference/configs/config_236B.json +20 -0
  12. inference/configs/config_671B.json +22 -0
  13. inference/convert.py +84 -0
  14. inference/fp8_cast_bf16.py +81 -0
  15. inference/generate.py +137 -0
  16. inference/kernel.py +108 -0
  17. inference/model.py +421 -0
  18. inference/requirements.txt +4 -0
  19. model-00001-of-000163.safetensors +3 -0
  20. model-00002-of-000163.safetensors +3 -0
  21. model-00004-of-000163.safetensors +3 -0
  22. model-00005-of-000163.safetensors +3 -0
  23. model-00006-of-000163.safetensors +3 -0
  24. model-00007-of-000163.safetensors +3 -0
  25. model-00008-of-000163.safetensors +3 -0
  26. model-00009-of-000163.safetensors +3 -0
  27. model-00010-of-000163.safetensors +3 -0
  28. model-00012-of-000163.safetensors +3 -0
  29. model-00013-of-000163.safetensors +3 -0
  30. model-00014-of-000163.safetensors +3 -0
  31. model-00015-of-000163.safetensors +3 -0
  32. model-00016-of-000163.safetensors +3 -0
  33. model-00017-of-000163.safetensors +3 -0
  34. model-00018-of-000163.safetensors +3 -0
  35. model-00019-of-000163.safetensors +3 -0
  36. model-00021-of-000163.safetensors +3 -0
  37. model-00022-of-000163.safetensors +3 -0
  38. model-00023-of-000163.safetensors +3 -0
  39. model-00024-of-000163.safetensors +3 -0
  40. model-00025-of-000163.safetensors +3 -0
  41. model-00026-of-000163.safetensors +3 -0
  42. model-00027-of-000163.safetensors +3 -0
  43. model-00028-of-000163.safetensors +3 -0
  44. model-00029-of-000163.safetensors +3 -0
  45. model-00030-of-000163.safetensors +3 -0
  46. model-00031-of-000163.safetensors +3 -0
  47. model-00032-of-000163.safetensors +3 -0
  48. model-00033-of-000163.safetensors +3 -0
  49. model-00034-of-000163.safetensors +3 -0
  50. model-00035-of-000163.safetensors +3 -0
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepseekV3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_deepseek.DeepseekV3Config",
9
+ "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
+ },
12
+ "aux_loss_alpha": 0.001,
13
+ "bos_token_id": 0,
14
+ "eos_token_id": 1,
15
+ "ep_size": 1,
16
+ "first_k_dense_replace": 3,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 7168,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 18432,
21
+ "kv_lora_rank": 512,
22
+ "max_position_embeddings": 163840,
23
+ "model_type": "deepseek_v3",
24
+ "moe_intermediate_size": 2048,
25
+ "moe_layer_freq": 1,
26
+ "n_group": 8,
27
+ "n_routed_experts": 256,
28
+ "n_shared_experts": 1,
29
+ "norm_topk_prob": true,
30
+ "num_attention_heads": 128,
31
+ "num_experts_per_tok": 8,
32
+ "num_hidden_layers": 61,
33
+ "num_key_value_heads": 128,
34
+ "num_nextn_predict_layers": 1,
35
+ "pretraining_tp": 1,
36
+ "q_lora_rank": 1536,
37
+ "qk_nope_head_dim": 128,
38
+ "qk_rope_head_dim": 64,
39
+ "quantization_config": {
40
+ "activation_scheme": "dynamic",
41
+ "fmt": "e4m3",
42
+ "quant_method": "fp8",
43
+ "weight_block_size": [
44
+ 128,
45
+ 128
46
+ ]
47
+ },
48
+ "rms_norm_eps": 1e-06,
49
+ "rope_scaling": {
50
+ "beta_fast": 32,
51
+ "beta_slow": 1,
52
+ "factor": 40,
53
+ "mscale": 1.0,
54
+ "mscale_all_dim": 1.0,
55
+ "original_max_position_embeddings": 4096,
56
+ "type": "yarn"
57
+ },
58
+ "rope_theta": 10000,
59
+ "routed_scaling_factor": 2.5,
60
+ "scoring_func": "sigmoid",
61
+ "seq_aux": true,
62
+ "tie_word_embeddings": false,
63
+ "topk_group": 4,
64
+ "topk_method": "noaux_tc",
65
+ "torch_dtype": "bfloat16",
66
+ "transformers_version": "4.46.3",
67
+ "use_cache": true,
68
+ "v_head_dim": 128,
69
+ "vocab_size": 129280
70
+ }
config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DeepseekV3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_deepseek.DeepseekV3Config",
9
+ "AutoModel": "modeling_deepseek.DeepseekV3Model",
10
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
11
+ },
12
+ "aux_loss_alpha": 0.001,
13
+ "bos_token_id": 0,
14
+ "eos_token_id": 1,
15
+ "ep_size": 1,
16
+ "first_k_dense_replace": 3,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 7168,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 18432,
21
+ "kv_lora_rank": 512,
22
+ "max_position_embeddings": 163840,
23
+ "model_type": "deepseek_v3",
24
+ "moe_intermediate_size": 2048,
25
+ "moe_layer_freq": 1,
26
+ "n_group": 8,
27
+ "n_routed_experts": 256,
28
+ "n_shared_experts": 1,
29
+ "norm_topk_prob": true,
30
+ "num_attention_heads": 128,
31
+ "num_experts_per_tok": 8,
32
+ "num_hidden_layers": 61,
33
+ "num_key_value_heads": 128,
34
+ "num_nextn_predict_layers": 1,
35
+ "pretraining_tp": 1,
36
+ "q_lora_rank": 1536,
37
+ "qk_nope_head_dim": 128,
38
+ "qk_rope_head_dim": 64,
39
+ "quantization_config": {
40
+ "activation_scheme": "dynamic",
41
+ "fmt": "e4m3",
42
+ "quant_method": "fp8",
43
+ "weight_block_size": [
44
+ 128,
45
+ 128
46
+ ]
47
+ },
48
+ "rms_norm_eps": 1e-06,
49
+ "rope_scaling": {
50
+ "beta_fast": 32,
51
+ "beta_slow": 1,
52
+ "factor": 40,
53
+ "mscale": 1.0,
54
+ "mscale_all_dim": 1.0,
55
+ "original_max_position_embeddings": 4096,
56
+ "type": "yarn"
57
+ },
58
+ "rope_theta": 10000,
59
+ "routed_scaling_factor": 2.5,
60
+ "scoring_func": "sigmoid",
61
+ "seq_aux": true,
62
+ "tie_word_embeddings": false,
63
+ "topk_group": 4,
64
+ "topk_method": "noaux_tc",
65
+ "torch_dtype": "bfloat16",
66
+ "transformers_version": "4.46.3",
67
+ "use_cache": true,
68
+ "v_head_dim": 128,
69
+ "vocab_size": 129280
70
+ }
configuration_deepseek.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+ logger = logging.get_logger(__name__)
5
+
6
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
7
+ class DeepseekV3Config(PretrainedConfig):
8
+ r"""
9
+ This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
10
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
11
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
12
+
13
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
14
+ documentation from [`PretrainedConfig`] for more information.
15
+
16
+
17
+ Args:
18
+ vocab_size (`int`, *optional*, defaults to 129280):
19
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
20
+ `inputs_ids` passed when calling [`DeepseekV3Model`]
21
+ hidden_size (`int`, *optional*, defaults to 4096):
22
+ Dimension of the hidden representations.
23
+ intermediate_size (`int`, *optional*, defaults to 11008):
24
+ Dimension of the MLP representations.
25
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
26
+ Dimension of the MoE representations.
27
+ num_hidden_layers (`int`, *optional*, defaults to 32):
28
+ Number of hidden layers in the Transformer decoder.
29
+ num_nextn_predict_layers (`int`, *optional*, defaults to 1):
30
+ Number of nextn predict layers in the DeepSeekV3 Model.
31
+ num_attention_heads (`int`, *optional*, defaults to 32):
32
+ Number of attention heads for each attention layer in the Transformer decoder.
33
+ n_shared_experts (`int`, *optional*, defaults to None):
34
+ Number of shared experts, None means dense model.
35
+ n_routed_experts (`int`, *optional*, defaults to None):
36
+ Number of routed experts, None means dense model.
37
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
38
+ Scaling factor or routed experts.
39
+ topk_method (`str`, *optional*, defaults to `gready`):
40
+ Topk method used in routed gate.
41
+ n_group (`int`, *optional*, defaults to None):
42
+ Number of groups for routed experts.
43
+ topk_group (`int`, *optional*, defaults to None):
44
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
45
+ num_experts_per_tok (`int`, *optional*, defaults to None):
46
+ Number of selected experts, None means dense model.
47
+ moe_layer_freq (`int`, *optional*, defaults to 1):
48
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
49
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
50
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
51
+ \--k dense layers--/
52
+ norm_topk_prob (`bool`, *optional*, defaults to False):
53
+ Whether to normalize the weights of the routed experts.
54
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
55
+ Method of computing expert weights.
56
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
57
+ Auxiliary loss weight coefficient.
58
+ seq_aux = (`bool`, *optional*, defaults to True):
59
+ Whether to compute the auxiliary loss for each individual sample.
60
+ num_key_value_heads (`int`, *optional*):
61
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
62
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
63
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
64
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
65
+ by meanpooling all the original heads within that group. For more details checkout [this
66
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
67
+ `num_attention_heads`.
68
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
69
+ The non-linear activation function (function or string) in the decoder.
70
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
71
+ The maximum sequence length that this model might ever be used with.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
75
+ The epsilon used by the rms normalization layers.
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
78
+ relevant if `config.is_decoder=True`.
79
+ pad_token_id (`int`, *optional*):
80
+ Padding token id.
81
+ bos_token_id (`int`, *optional*, defaults to 1):
82
+ Beginning of stream token id.
83
+ eos_token_id (`int`, *optional*, defaults to 2):
84
+ End of stream token id.
85
+ pretraining_tp (`int`, *optional*, defaults to 1):
86
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
87
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
88
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
89
+ issue](https://github.com/pytorch/pytorch/issues/76232).
90
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
91
+ Whether to tie weight embeddings
92
+ rope_theta (`float`, *optional*, defaults to 10000.0):
93
+ The base period of the RoPE embeddings.
94
+ rope_scaling (`Dict`, *optional*):
95
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
96
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
97
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
98
+ `max_position_embeddings` to the expected new maximum.
99
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
100
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
101
+ attention_dropout (`float`, *optional*, defaults to 0.0):
102
+ The dropout ratio for the attention probabilities.
103
+
104
+ ```python
105
+ >>> from transformers import DeepseekV3Model, DeepseekV3Config
106
+
107
+ >>> # Initializing a Deepseek-V3 style configuration
108
+ >>> configuration = DeepseekV3Config()
109
+
110
+ >>> # Accessing the model configuration
111
+ >>> configuration = model.config
112
+ ```"""
113
+
114
+ model_type = "deepseek_v3"
115
+ keys_to_ignore_at_inference = ["past_key_values"]
116
+
117
+ def __init__(
118
+ self,
119
+ vocab_size=129280,
120
+ hidden_size=7168,
121
+ intermediate_size=18432,
122
+ moe_intermediate_size = 2048,
123
+ num_hidden_layers=61,
124
+ num_nextn_predict_layers=1,
125
+ num_attention_heads=128,
126
+ num_key_value_heads=128,
127
+ n_shared_experts = 1,
128
+ n_routed_experts = 256,
129
+ ep_size = 1,
130
+ routed_scaling_factor = 2.5,
131
+ kv_lora_rank = 512,
132
+ q_lora_rank = 1536,
133
+ qk_rope_head_dim = 64,
134
+ v_head_dim = 128,
135
+ qk_nope_head_dim = 128,
136
+ topk_method = 'noaux_tc',
137
+ n_group = 8,
138
+ topk_group = 4,
139
+ num_experts_per_tok = 8,
140
+ moe_layer_freq = 1,
141
+ first_k_dense_replace = 3,
142
+ norm_topk_prob = True,
143
+ scoring_func = 'sigmoid',
144
+ aux_loss_alpha = 0.001,
145
+ seq_aux = True,
146
+ hidden_act="silu",
147
+ max_position_embeddings=4096,
148
+ initializer_range=0.02,
149
+ rms_norm_eps=1e-6,
150
+ use_cache=True,
151
+ pad_token_id=None,
152
+ bos_token_id=0,
153
+ eos_token_id=1,
154
+ pretraining_tp=1,
155
+ tie_word_embeddings=False,
156
+ rope_theta=10000.0,
157
+ rope_scaling=None,
158
+ attention_bias=False,
159
+ attention_dropout=0.0,
160
+ **kwargs,
161
+ ):
162
+ self.vocab_size = vocab_size
163
+ self.max_position_embeddings = max_position_embeddings
164
+ self.hidden_size = hidden_size
165
+ self.intermediate_size = intermediate_size
166
+ self.moe_intermediate_size = moe_intermediate_size
167
+ self.num_hidden_layers = num_hidden_layers
168
+ self.num_nextn_predict_layers = num_nextn_predict_layers
169
+ self.num_attention_heads = num_attention_heads
170
+ self.n_shared_experts = n_shared_experts
171
+ self.n_routed_experts = n_routed_experts
172
+ self.ep_size = ep_size
173
+ self.routed_scaling_factor = routed_scaling_factor
174
+ self.kv_lora_rank = kv_lora_rank
175
+ self.q_lora_rank = q_lora_rank
176
+ self.qk_rope_head_dim = qk_rope_head_dim
177
+ self.v_head_dim = v_head_dim
178
+ self.qk_nope_head_dim = qk_nope_head_dim
179
+ self.topk_method = topk_method
180
+ self.n_group = n_group
181
+ self.topk_group = topk_group
182
+ self.num_experts_per_tok = num_experts_per_tok
183
+ self.moe_layer_freq = moe_layer_freq
184
+ self.first_k_dense_replace = first_k_dense_replace
185
+ self.norm_topk_prob = norm_topk_prob
186
+ self.scoring_func = scoring_func
187
+ self.aux_loss_alpha = aux_loss_alpha
188
+ self.seq_aux = seq_aux
189
+ # for backward compatibility
190
+ if num_key_value_heads is None:
191
+ num_key_value_heads = num_attention_heads
192
+
193
+ self.num_key_value_heads = num_key_value_heads
194
+ self.hidden_act = hidden_act
195
+ self.initializer_range = initializer_range
196
+ self.rms_norm_eps = rms_norm_eps
197
+ self.pretraining_tp = pretraining_tp
198
+ self.use_cache = use_cache
199
+ self.rope_theta = rope_theta
200
+ self.rope_scaling = rope_scaling
201
+ self.attention_bias = attention_bias
202
+ self.attention_dropout = attention_dropout
203
+
204
+ super().__init__(
205
+ pad_token_id=pad_token_id,
206
+ bos_token_id=bos_token_id,
207
+ eos_token_id=eos_token_id,
208
+ tie_word_embeddings=tie_word_embeddings,
209
+ **kwargs,
210
+ )
inference/.ipynb_checkpoints/convert-checkpoint.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from argparse import ArgumentParser
4
+ from glob import glob
5
+ from tqdm import tqdm, trange
6
+
7
+ import torch
8
+ from safetensors.torch import safe_open, save_file
9
+
10
+
11
+ mapping = {
12
+ "embed_tokens": ("embed", 0),
13
+ "input_layernorm": ("attn_norm", None),
14
+ "post_attention_layernorm": ("ffn_norm", None),
15
+ "q_proj": ("wq", 0),
16
+ "q_a_proj": ("wq_a", None),
17
+ "q_a_layernorm": ("q_norm", None),
18
+ "q_b_proj": ("wq_b", 0),
19
+ "kv_a_proj_with_mqa": ("wkv_a", None),
20
+ "kv_a_layernorm": ("kv_norm", None),
21
+ "kv_b_proj": ("wkv_b", 0),
22
+ "o_proj": ("wo", 1),
23
+ "gate": ("gate", None),
24
+ "gate_proj": ("w1", 0),
25
+ "down_proj": ("w2", 1),
26
+ "up_proj": ("w3", 0),
27
+ "norm": ("norm", None),
28
+ "lm_head": ("head", 0),
29
+ "scale": ("scale", None),
30
+ }
31
+
32
+
33
+ def main(hf_ckpt_path, save_path, n_experts, mp):
34
+ torch.set_num_threads(8)
35
+ n_local_experts = n_experts // mp
36
+ state_dicts = [{} for _ in range(mp)]
37
+
38
+ for file_path in tqdm(glob(os.path.join(hf_ckpt_path, "*.safetensors"))):
39
+ with safe_open(file_path, framework="pt", device="cpu") as f:
40
+ for name in f.keys():
41
+ if "model.layers.61" in name:
42
+ continue
43
+ param: torch.Tensor = f.get_tensor(name)
44
+ if name.startswith("model."):
45
+ name = name[len("model."):]
46
+ name = name.replace("self_attn", "attn")
47
+ name = name.replace("mlp", "ffn")
48
+ name = name.replace("weight_scale_inv", "scale")
49
+ name = name.replace("e_score_correction_bias", "bias")
50
+ key = name.split(".")[-2]
51
+ assert key in mapping
52
+ new_key, dim = mapping[key]
53
+ name = name.replace(key, new_key)
54
+ for i in range(mp):
55
+ new_param = param
56
+ if "experts" in name and "shared_experts" not in name:
57
+ idx = int(name.split(".")[-3])
58
+ if idx < i * n_local_experts or idx >= (i + 1) * n_local_experts:
59
+ continue
60
+ elif dim is not None:
61
+ assert param.size(dim) % mp == 0
62
+ shard_size = param.size(dim) // mp
63
+ new_param = param.narrow(dim, i * shard_size, shard_size).contiguous()
64
+ state_dicts[i][name] = new_param
65
+
66
+ os.makedirs(save_path, exist_ok=True)
67
+
68
+ for i in trange(mp):
69
+ save_file(state_dicts[i], os.path.join(save_path, f"model{i}-mp{mp}.safetensors"))
70
+
71
+ for file_path in glob(os.path.join(hf_ckpt_path, "*token*")):
72
+ new_file_path = os.path.join(save_path, os.path.basename(file_path))
73
+ shutil.copyfile(file_path, new_file_path)
74
+
75
+
76
+ if __name__ == "__main__":
77
+ parser = ArgumentParser()
78
+ parser.add_argument("--hf-ckpt-path", type=str, required=True)
79
+ parser.add_argument("--save-path", type=str, required=True)
80
+ parser.add_argument("--n-experts", type=int, required=True)
81
+ parser.add_argument("--model-parallel", type=int, default=1)
82
+ args = parser.parse_args()
83
+ assert args.n_experts % args.model_parallel == 0
84
+ main(args.hf_ckpt_path, args.save_path, args.n_experts, args.model_parallel)
inference/.ipynb_checkpoints/fp8_cast_bf16-checkpoint.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from argparse import ArgumentParser
4
+ from glob import glob
5
+ from tqdm import tqdm
6
+
7
+ import torch
8
+ from safetensors.torch import load_file, save_file
9
+
10
+ from kernel import weight_dequant
11
+
12
+ def main(fp8_path, bf16_path):
13
+ torch.set_default_dtype(torch.bfloat16)
14
+ os.makedirs(bf16_path, exist_ok=True)
15
+ model_index_file = os.path.join(fp8_path, "model.safetensors.index.json")
16
+ with open(model_index_file, "r") as f:
17
+ model_index = json.load(f)
18
+ weight_map = model_index["weight_map"]
19
+
20
+ # Cache for loaded safetensor files
21
+ loaded_files = {}
22
+ fp8_weight_names = []
23
+
24
+ # Helper function to get tensor from the correct file
25
+ def get_tensor(tensor_name):
26
+ file_name = weight_map[tensor_name]
27
+ if file_name not in loaded_files:
28
+ file_path = os.path.join(fp8_path, file_name)
29
+ loaded_files[file_name] = load_file(file_path, device="cuda")
30
+ return loaded_files[file_name][tensor_name]
31
+
32
+ safetensor_files = list(glob(os.path.join(fp8_path, "*.safetensors")))
33
+ safetensor_files.sort()
34
+ for safetensor_file in tqdm(safetensor_files):
35
+ file_name = os.path.basename(safetensor_file)
36
+ current_state_dict = load_file(safetensor_file, device="cuda")
37
+ loaded_files[file_name] = current_state_dict
38
+
39
+ new_state_dict = {}
40
+ for weight_name, weight in current_state_dict.items():
41
+ if weight_name.endswith("_scale_inv"):
42
+ continue
43
+ elif weight.element_size() == 1: # FP8 weight
44
+ scale_inv_name = f"{weight_name}_scale_inv"
45
+ try:
46
+ # Get scale_inv from the correct file
47
+ scale_inv = get_tensor(scale_inv_name)
48
+ fp8_weight_names.append(weight_name)
49
+ new_state_dict[weight_name] = weight_dequant(weight, scale_inv)
50
+ except KeyError:
51
+ print(f"Warning: Missing scale_inv tensor for {weight_name}, skipping conversion")
52
+ new_state_dict[weight_name] = weight
53
+ else:
54
+ new_state_dict[weight_name] = weight
55
+
56
+ new_safetensor_file = os.path.join(bf16_path, file_name)
57
+ save_file(new_state_dict, new_safetensor_file)
58
+
59
+ # Memory management: keep only the 2 most recently used files
60
+ if len(loaded_files) > 2:
61
+ oldest_file = next(iter(loaded_files))
62
+ del loaded_files[oldest_file]
63
+ torch.cuda.empty_cache()
64
+
65
+ # Update model index
66
+ new_model_index_file = os.path.join(bf16_path, "model.safetensors.index.json")
67
+ for weight_name in fp8_weight_names:
68
+ scale_inv_name = f"{weight_name}_scale_inv"
69
+ if scale_inv_name in weight_map:
70
+ weight_map.pop(scale_inv_name)
71
+ with open(new_model_index_file, "w") as f:
72
+ json.dump({"metadata": {}, "weight_map": weight_map}, f, indent=2)
73
+
74
+
75
+ if __name__ == "__main__":
76
+ parser = ArgumentParser()
77
+ parser.add_argument("--input-fp8-hf-path", type=str, required=True)
78
+ parser.add_argument("--output-bf16-hf-path", type=str, required=True)
79
+ args = parser.parse_args()
80
+ main(args.input_fp8_hf_path, args.output_bf16_hf_path)
81
+
inference/.ipynb_checkpoints/generate-checkpoint.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from argparse import ArgumentParser
4
+ from typing import List
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ from transformers import AutoTokenizer
9
+ from safetensors.torch import load_model
10
+
11
+ from model import Transformer, ModelArgs
12
+
13
+
14
+ def sample(logits, temperature: float = 1.0):
15
+ logits = logits / max(temperature, 1e-5)
16
+ probs = torch.softmax(logits, dim=-1)
17
+ return probs.div_(torch.empty_like(probs).exponential_(1)).argmax(dim=-1)
18
+
19
+
20
+ @torch.inference_mode()
21
+ def generate(
22
+ model: Transformer,
23
+ prompt_tokens: List[List[int]],
24
+ max_new_tokens: int,
25
+ eos_id: int,
26
+ temperature: float = 1.0
27
+ ) -> List[List[int]]:
28
+ prompt_lens = [len(t) for t in prompt_tokens]
29
+ assert max(prompt_lens) <= model.max_seq_len
30
+ total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens))
31
+ tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device="cuda")
32
+ for i, t in enumerate(prompt_tokens):
33
+ tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
34
+ prev_pos = 0
35
+ finished = torch.tensor([False] * len(prompt_tokens), device="cuda")
36
+ prompt_mask = tokens != -1
37
+ for cur_pos in range(min(prompt_lens), total_len):
38
+ logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
39
+ if temperature > 0:
40
+ next_token = sample(logits, temperature)
41
+ else:
42
+ next_token = logits.argmax(dim=-1)
43
+ next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token)
44
+ tokens[:, cur_pos] = next_token
45
+ finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id)
46
+ prev_pos = cur_pos
47
+ if finished.all():
48
+ break
49
+ completion_tokens = []
50
+ for i, toks in enumerate(tokens.tolist()):
51
+ toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens]
52
+ if eos_id in toks:
53
+ toks = toks[:toks.index(eos_id)]
54
+ completion_tokens.append(toks)
55
+ return completion_tokens
56
+
57
+
58
+ def main(
59
+ ckpt_path: str,
60
+ config: str,
61
+ input_file: str = "",
62
+ interactive: bool = True,
63
+ max_new_tokens: int = 100,
64
+ temperature: float = 1.0,
65
+ ) -> None:
66
+ world_size = int(os.getenv("WORLD_SIZE", "1"))
67
+ rank = int(os.getenv("RANK", "0"))
68
+ local_rank = int(os.getenv("LOCAL_RANK", "0"))
69
+ if world_size > 1:
70
+ dist.init_process_group("nccl")
71
+ global print
72
+ if rank != 0:
73
+ print = lambda *_, **__: None
74
+ torch.cuda.set_device(local_rank)
75
+ torch.set_default_dtype(torch.bfloat16)
76
+ torch.set_num_threads(8)
77
+ torch.manual_seed(965)
78
+ with open(config) as f:
79
+ args = ModelArgs(**json.load(f))
80
+ print(args)
81
+ with torch.device("cuda"):
82
+ model = Transformer(args)
83
+ tokenizer = AutoTokenizer.from_pretrained(ckpt_path)
84
+ tokenizer.decode(generate(model, [tokenizer.encode("DeepSeek")], 2, -1, 1.)[0])
85
+ load_model(model, os.path.join(ckpt_path, f"model{rank}-mp{world_size}.safetensors"))
86
+
87
+ if interactive:
88
+ messages = []
89
+ while True:
90
+ if world_size == 1:
91
+ prompt = input(">>> ")
92
+ elif rank == 0:
93
+ prompt = input(">>> ")
94
+ objects = [prompt]
95
+ dist.broadcast_object_list(objects, 0)
96
+ else:
97
+ objects = [None]
98
+ dist.broadcast_object_list(objects, 0)
99
+ prompt = objects[0]
100
+ if prompt == "/exit":
101
+ break
102
+ elif prompt == "/clear":
103
+ messages.clear()
104
+ continue
105
+ messages.append({"role": "user", "content": prompt})
106
+ prompt_tokens = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
107
+ completion_tokens = generate(model, [prompt_tokens], max_new_tokens, tokenizer.eos_token_id, temperature)
108
+ completion = tokenizer.decode(completion_tokens[0], skip_special_tokens=True)
109
+ print(completion)
110
+ messages.append({"role": "assistant", "content": completion})
111
+ else:
112
+ with open(input_file) as f:
113
+ prompts = [line.strip() for line in f.readlines()]
114
+ assert len(prompts) <= args.max_batch_size
115
+ prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts]
116
+ completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature)
117
+ completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)
118
+ for prompt, completion in zip(prompts, completions):
119
+ print("Prompt:", prompt)
120
+ print("Completion:", completion)
121
+ print()
122
+
123
+ if world_size > 1:
124
+ dist.destroy_process_group()
125
+
126
+
127
+ if __name__ == "__main__":
128
+ parser = ArgumentParser()
129
+ parser.add_argument("--ckpt-path", type=str, required=True)
130
+ parser.add_argument("--config", type=str, required=True)
131
+ parser.add_argument("--input-file", type=str, default="")
132
+ parser.add_argument("--interactive", action="store_true")
133
+ parser.add_argument("--max-new-tokens", type=int, default=200)
134
+ parser.add_argument("--temperature", type=float, default=0.2)
135
+ args = parser.parse_args()
136
+ assert args.input_file or args.interactive
137
+ main(args.ckpt_path, args.config, args.input_file, args.interactive, args.max_new_tokens, args.temperature)
inference/.ipynb_checkpoints/kernel-checkpoint.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import torch
4
+ import triton
5
+ import triton.language as tl
6
+ from triton import Config
7
+
8
+
9
+ @triton.jit
10
+ def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):
11
+ pid = tl.program_id(axis=0)
12
+ offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
13
+ x = tl.load(x_ptr + offs).to(tl.float32)
14
+ s = tl.max(tl.abs(x)) / 448.
15
+ y = x / s
16
+ y = y.to(y_ptr.dtype.element_ty)
17
+ tl.store(y_ptr + offs, y)
18
+ tl.store(s_ptr + pid, s)
19
+
20
+
21
+ def act_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:
22
+ assert x.is_contiguous()
23
+ assert x.size(-1) % block_size == 0
24
+ y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
25
+ s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
26
+ grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )
27
+ act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)
28
+ return y, s
29
+
30
+
31
+ @triton.jit
32
+ def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):
33
+ pid_m = tl.program_id(axis=0)
34
+ pid_n = tl.program_id(axis=1)
35
+ n = tl.cdiv(N, BLOCK_SIZE)
36
+ offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
37
+ offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
38
+ offs = offs_m[:, None] * N + offs_n[None, :]
39
+ mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
40
+ x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
41
+ s = tl.load(s_ptr + pid_m * n + pid_n)
42
+ y = x * s
43
+ tl.store(y_ptr + offs, y, mask=mask)
44
+
45
+
46
+ def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:
47
+ assert x.is_contiguous() and s.is_contiguous()
48
+ assert x.dim() == 2 and s.dim() == 2
49
+ M, N = x.size()
50
+ y = torch.empty_like(x, dtype=torch.get_default_dtype())
51
+ grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
52
+ weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)
53
+ return y
54
+
55
+
56
+ fp8_gemm_configs = [
57
+ Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8)
58
+ for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6]
59
+ ]
60
+
61
+ @triton.autotune(configs=fp8_gemm_configs, key=['N', 'K'])
62
+ @triton.jit
63
+ def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr,
64
+ a_s_ptr, b_s_ptr,
65
+ M, N: tl.constexpr, K: tl.constexpr,
66
+ BLOCK_SIZE_M: tl.constexpr,
67
+ BLOCK_SIZE_N: tl.constexpr,
68
+ BLOCK_SIZE_K: tl.constexpr):
69
+ pid_m = tl.program_id(axis=0)
70
+ pid_n = tl.program_id(axis=1)
71
+ k = tl.cdiv(K, BLOCK_SIZE_K)
72
+ offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
73
+ offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
74
+ offs_k = tl.arange(0, BLOCK_SIZE_K)
75
+ a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :]
76
+ b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None]
77
+ a_s_ptrs = a_s_ptr + offs_m * k
78
+ b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k
79
+
80
+ accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
81
+ for i in range(k):
82
+ a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0)
83
+ b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0)
84
+ a_s = tl.load(a_s_ptrs)
85
+ b_s = tl.load(b_s_ptrs)
86
+ accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
87
+ a_ptrs += BLOCK_SIZE_K
88
+ b_ptrs += BLOCK_SIZE_K
89
+ a_s_ptrs += 1
90
+ b_s_ptrs += 1
91
+ c = accumulator.to(c_ptr.dtype.element_ty)
92
+ offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
93
+ offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
94
+ c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :]
95
+ mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
96
+ tl.store(c_ptrs, c, mask=mask)
97
+
98
+
99
+ def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor):
100
+ assert a.is_contiguous() and b.is_contiguous()
101
+ assert a_s.is_contiguous() and b_s.is_contiguous()
102
+ K = a.size(-1)
103
+ M = a.numel() // K
104
+ N = b.size(0)
105
+ c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype())
106
+ grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N']))
107
+ fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K)
108
+ return c
inference/.ipynb_checkpoints/model-checkpoint.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Tuple, Optional, Literal
4
+
5
+ import torch
6
+ from torch import nn
7
+ import torch.nn.functional as F
8
+ import torch.distributed as dist
9
+
10
+ from kernel import act_quant, weight_dequant, fp8_gemm
11
+
12
+
13
+ world_size = 1
14
+ rank = 0
15
+ block_size = 128
16
+ gemm_impl: Literal["bf16", "fp8"] = "bf16"
17
+ attn_impl: Literal["naive", "absorb"] = "absorb"
18
+
19
+ @dataclass
20
+ class ModelArgs:
21
+ max_batch_size: int = 8
22
+ max_seq_len: int = 4096 * 4
23
+ dtype: Literal["bf16", "fp8"] = "bf16"
24
+ vocab_size: int = 102400
25
+ dim: int = 2048
26
+ inter_dim: int = 10944
27
+ moe_inter_dim: int = 1408
28
+ n_layers: int = 27
29
+ n_dense_layers: int = 1
30
+ n_heads: int = 16
31
+ # moe
32
+ n_routed_experts: int = 64
33
+ n_shared_experts: int = 2
34
+ n_activated_experts: int = 6
35
+ n_expert_groups: int = 1
36
+ n_limited_groups: int = 1
37
+ score_func: Literal["softmax", "sigmoid"] = "softmax"
38
+ route_scale: float = 1.
39
+ # mla
40
+ q_lora_rank: int = 0
41
+ kv_lora_rank: int = 512
42
+ qk_nope_head_dim: int = 128
43
+ qk_rope_head_dim: int = 64
44
+ v_head_dim: int = 128
45
+ # yarn
46
+ original_seq_len: int = 4096
47
+ rope_theta: float = 10000.0
48
+ rope_factor: float = 40
49
+ beta_fast: int = 32
50
+ beta_slow: int = 1
51
+ mscale: float = 1.
52
+
53
+
54
+ class ParallelEmbedding(nn.Module):
55
+ def __init__(self, vocab_size: int, dim: int):
56
+ super().__init__()
57
+ self.vocab_size = vocab_size
58
+ self.dim = dim
59
+ assert vocab_size % world_size == 0
60
+ self.part_vocab_size = (vocab_size // world_size)
61
+ self.vocab_start_idx = rank * self.part_vocab_size
62
+ self.vocab_end_idx = self.vocab_start_idx + self.part_vocab_size
63
+ self.weight = nn.Parameter(torch.empty(self.part_vocab_size, self.dim))
64
+
65
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
66
+ if world_size > 1:
67
+ mask = (x < self.vocab_start_idx) | (x >= self.vocab_end_idx)
68
+ x = x - self.vocab_start_idx
69
+ x[mask] = 0
70
+ y = F.embedding(x, self.weight)
71
+ if world_size > 1:
72
+ y[mask] = 0
73
+ dist.all_reduce(y)
74
+ return y
75
+
76
+
77
+ def linear(x: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor:
78
+ if weight.element_size() > 1:
79
+ return F.linear(x, weight, bias)
80
+ elif gemm_impl == "bf16":
81
+ weight = weight_dequant(weight, weight.scale)
82
+ return F.linear(x, weight, bias)
83
+ else:
84
+ x, scale = act_quant(x, block_size)
85
+ y = fp8_gemm(x, scale, weight, weight.scale)
86
+ if bias is not None:
87
+ y += bias
88
+ return y
89
+
90
+
91
+ class Linear(nn.Module):
92
+ dtype = torch.bfloat16
93
+
94
+ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
95
+ super().__init__()
96
+ self.in_features = in_features
97
+ self.out_features = out_features
98
+ self.weight = nn.Parameter(torch.empty(out_features, in_features, dtype=dtype or Linear.dtype))
99
+ if self.weight.element_size() == 1:
100
+ scale_out_features = (out_features + block_size - 1) // block_size
101
+ scale_in_features = (in_features + block_size - 1) // block_size
102
+ self.weight.scale = self.scale = nn.Parameter(torch.empty(scale_out_features, scale_in_features, dtype=torch.float32))
103
+ else:
104
+ self.register_parameter("scale", None)
105
+ if bias:
106
+ self.bias = nn.Parameter(torch.empty(self.part_out_features))
107
+ else:
108
+ self.register_parameter("bias", None)
109
+
110
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
111
+ return linear(x, self.weight, self.bias)
112
+
113
+
114
+ class ColumnParallelLinear(Linear):
115
+ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
116
+ assert out_features % world_size == 0
117
+ self.part_out_features = out_features // world_size
118
+ super().__init__(in_features, self.part_out_features, bias, dtype)
119
+
120
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
121
+ y = linear(x, self.weight, self.bias)
122
+ return y
123
+
124
+
125
+ class RowParallelLinear(Linear):
126
+ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
127
+ assert in_features % world_size == 0
128
+ self.part_in_features = in_features // world_size
129
+ super().__init__(self.part_in_features, out_features, bias, dtype)
130
+
131
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
132
+ y = linear(x, self.weight)
133
+ if world_size > 1:
134
+ dist.all_reduce(y)
135
+ if self.bias is not None:
136
+ y += self.bias
137
+ return y
138
+
139
+
140
+ class RMSNorm(nn.Module):
141
+ def __init__(self, dim: int, eps: float = 1e-6):
142
+ super().__init__()
143
+ self.eps = eps
144
+ self.weight = nn.Parameter(torch.ones(dim))
145
+
146
+ def forward(self, x: torch.Tensor):
147
+ x = x.float()
148
+ y = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
149
+ return y.type_as(self.weight) * self.weight
150
+
151
+
152
+ def precompute_freqs_cis(args: ModelArgs) -> torch.Tensor:
153
+ dim = args.qk_rope_head_dim
154
+ seqlen = args.max_seq_len
155
+ beta_fast = args.beta_fast
156
+ beta_slow = args.beta_slow
157
+ base = args.rope_theta
158
+ factor = args.rope_factor
159
+
160
+ def find_correction_dim(num_rotations, dim, base, max_seq_len):
161
+ return dim * math.log(max_seq_len / (num_rotations * 2 * math.pi)) / (2 * math.log(base))
162
+
163
+ def find_correction_range(low_rot, high_rot, dim, base, max_seq_len):
164
+ low = math.floor(find_correction_dim(low_rot, dim, base, max_seq_len))
165
+ high = math.ceil(find_correction_dim(high_rot, dim, base, max_seq_len))
166
+ return max(low, 0), min(high, dim-1)
167
+
168
+ def linear_ramp_factor(min, max, dim):
169
+ if min == max:
170
+ max += 0.001
171
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
172
+ ramp_func = torch.clamp(linear_func, 0, 1)
173
+ return ramp_func
174
+
175
+ freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
176
+ if seqlen > args.original_seq_len:
177
+ low, high = find_correction_range(beta_fast, beta_slow, dim, base, args.original_seq_len)
178
+ smooth = 1 - linear_ramp_factor(low, high, dim // 2)
179
+ freqs = freqs / factor * (1 - smooth) + freqs * smooth
180
+
181
+ t = torch.arange(seqlen)
182
+ freqs = torch.outer(t, freqs)
183
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
184
+ return freqs_cis
185
+
186
+
187
+ def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor:
188
+ dtype = x.dtype
189
+ x = torch.view_as_complex(x.float().view(*x.shape[:-1], -1, 2))
190
+ freqs_cis = freqs_cis.view(1, x.size(1), 1, x.size(-1))
191
+ y = torch.view_as_real(x * freqs_cis).flatten(3)
192
+ return y.to(dtype)
193
+
194
+
195
+ class MLA(nn.Module):
196
+ def __init__(self, args: ModelArgs):
197
+ super().__init__()
198
+ self.dim = args.dim
199
+ self.n_heads = args.n_heads
200
+ self.n_local_heads = args.n_heads // world_size
201
+ self.q_lora_rank = args.q_lora_rank
202
+ self.kv_lora_rank = args.kv_lora_rank
203
+ self.qk_nope_head_dim = args.qk_nope_head_dim
204
+ self.qk_rope_head_dim = args.qk_rope_head_dim
205
+ self.qk_head_dim = args.qk_nope_head_dim + args.qk_rope_head_dim
206
+ self.v_head_dim = args.v_head_dim
207
+
208
+ if self.q_lora_rank == 0:
209
+ self.wq = ColumnParallelLinear(self.dim, self.n_heads * self.qk_head_dim)
210
+ else:
211
+ self.wq_a = Linear(self.dim, self.q_lora_rank)
212
+ self.q_norm = RMSNorm(self.q_lora_rank)
213
+ self.wq_b = ColumnParallelLinear(self.q_lora_rank, self.n_heads * self.qk_head_dim)
214
+ self.wkv_a = Linear(self.dim, self.kv_lora_rank + self.qk_rope_head_dim)
215
+ self.kv_norm = RMSNorm(self.kv_lora_rank)
216
+ self.wkv_b = ColumnParallelLinear(self.kv_lora_rank, self.n_heads * (self.qk_nope_head_dim + self.v_head_dim))
217
+ self.wo = RowParallelLinear(self.n_heads * self.v_head_dim, self.dim)
218
+ self.softmax_scale = self.qk_head_dim ** -0.5
219
+ if args.max_seq_len > args.original_seq_len:
220
+ mscale = 0.1 * args.mscale * math.log(args.rope_factor) + 1.0
221
+ self.softmax_scale = self.softmax_scale * mscale * mscale
222
+
223
+ if attn_impl == "naive":
224
+ self.register_buffer("k_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.qk_head_dim), persistent=False)
225
+ self.register_buffer("v_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.v_head_dim), persistent=False)
226
+ else:
227
+ self.register_buffer("kv_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.kv_lora_rank), persistent=False)
228
+ self.register_buffer("pe_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.qk_rope_head_dim), persistent=False)
229
+
230
+ def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]):
231
+ bsz, seqlen, _ = x.size()
232
+ end_pos = start_pos + seqlen
233
+ if self.q_lora_rank == 0:
234
+ q = self.wq(x)
235
+ else:
236
+ q = self.wq_b(self.q_norm(self.wq_a(x)))
237
+ q = q.view(bsz, seqlen, self.n_local_heads, self.qk_head_dim)
238
+ q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
239
+ q_pe = apply_rotary_emb(q_pe, freqs_cis)
240
+ kv = self.wkv_a(x)
241
+ kv, k_pe = torch.split(kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
242
+ k_pe = apply_rotary_emb(k_pe.unsqueeze(2), freqs_cis)
243
+ if attn_impl == "naive":
244
+ q = torch.cat([q_nope, q_pe], dim=-1)
245
+ kv = self.wkv_b(self.kv_norm(kv))
246
+ kv = kv.view(bsz, seqlen, self.n_local_heads, self.qk_nope_head_dim + self.v_head_dim)
247
+ k_nope, v = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
248
+ k = torch.cat([k_nope, k_pe.expand(-1, -1, self.n_local_heads, -1)], dim=-1)
249
+ self.k_cache[:bsz, start_pos:end_pos] = k
250
+ self.v_cache[:bsz, start_pos:end_pos] = v
251
+ scores = torch.einsum("bshd,bthd->bsht", q, self.k_cache[:bsz, :end_pos]) * self.softmax_scale
252
+ else:
253
+ wkv_b = self.wkv_b.weight if self.wkv_b.scale is None else weight_dequant(self.wkv_b.weight, self.wkv_b.scale, block_size)
254
+ wkv_b = wkv_b.view(self.n_local_heads, -1, self.kv_lora_rank)
255
+ q_nope = torch.einsum("bshd,hdc->bshc", q_nope, wkv_b[:, :self.qk_nope_head_dim])
256
+ self.kv_cache[:bsz, start_pos:end_pos] = self.kv_norm(kv)
257
+ self.pe_cache[:bsz, start_pos:end_pos] = k_pe.squeeze(2)
258
+ scores = (torch.einsum("bshc,btc->bsht", q_nope, self.kv_cache[:bsz, :end_pos]) +
259
+ torch.einsum("bshr,btr->bsht", q_pe, self.pe_cache[:bsz, :end_pos])) * self.softmax_scale
260
+ if mask is not None:
261
+ scores += mask.unsqueeze(1)
262
+ scores = scores.softmax(dim=-1, dtype=torch.float32).type_as(x)
263
+ if attn_impl == "naive":
264
+ x = torch.einsum("bsht,bthd->bshd", scores, self.v_cache[:bsz, :end_pos])
265
+ else:
266
+ x = torch.einsum("bsht,btc->bshc", scores, self.kv_cache[:bsz, :end_pos])
267
+ x = torch.einsum("bshc,hdc->bshd", x, wkv_b[:, -self.v_head_dim:])
268
+ x = self.wo(x.flatten(2))
269
+ return x
270
+
271
+
272
+ class MLP(nn.Module):
273
+ def __init__(self, dim: int, inter_dim: int):
274
+ super().__init__()
275
+ self.w1 = ColumnParallelLinear(dim, inter_dim)
276
+ self.w2 = RowParallelLinear(inter_dim, dim)
277
+ self.w3 = ColumnParallelLinear(dim, inter_dim)
278
+
279
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
280
+ return self.w2(F.silu(self.w1(x)) * self.w3(x))
281
+
282
+
283
+ class Gate(nn.Module):
284
+ def __init__(self, args: ModelArgs):
285
+ super().__init__()
286
+ self.dim = args.dim
287
+ self.topk = args.n_activated_experts
288
+ self.n_groups = args.n_expert_groups
289
+ self.topk_groups = args.n_limited_groups
290
+ self.score_func = args.score_func
291
+ self.route_scale = args.route_scale
292
+ self.weight = nn.Parameter(torch.empty(args.n_routed_experts, args.dim))
293
+ self.bias = nn.Parameter(torch.empty(args.n_routed_experts)) if self.dim == 7168 else None
294
+
295
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
296
+ scores = linear(x, self.weight)
297
+ if self.score_func == "softmax":
298
+ scores = scores.softmax(dim=-1, dtype=torch.float32)
299
+ else:
300
+ scores = scores.sigmoid()
301
+ original_scores = scores
302
+ if self.bias is not None:
303
+ scores = scores + self.bias
304
+ if self.n_groups > 1:
305
+ scores = scores.view(x.size(0), self.n_groups, -1)
306
+ if self.bias is None:
307
+ group_scores = scores.amax(dim=-1)
308
+ else:
309
+ group_scores = scores.topk(2, dim=-1)[0].sum(dim=-1)
310
+ indices = group_scores.topk(self.topk_groups, dim=-1)[1]
311
+ mask = torch.zeros_like(scores[..., 0]).scatter_(1, indices, True)
312
+ scores = (scores * mask.unsqueeze(-1)).flatten(1)
313
+ indices = torch.topk(scores, self.topk, dim=-1)[1]
314
+ weights = original_scores.gather(1, indices)
315
+ if self.score_func == "sigmoid":
316
+ weights /= weights.sum(dim=-1, keepdim=True)
317
+ weights *= self.route_scale
318
+ return weights.type_as(x), indices
319
+
320
+
321
+ class Expert(nn.Module):
322
+ def __init__(self, dim: int, inter_dim: int):
323
+ super().__init__()
324
+ self.w1 = Linear(dim, inter_dim)
325
+ self.w2 = Linear(inter_dim, dim)
326
+ self.w3 = Linear(dim, inter_dim)
327
+
328
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
329
+ return self.w2(F.silu(self.w1(x)) * self.w3(x))
330
+
331
+
332
+ class MoE(nn.Module):
333
+ def __init__(self, args: ModelArgs):
334
+ super().__init__()
335
+ self.dim = args.dim
336
+ assert args.n_routed_experts % world_size == 0
337
+ self.n_routed_experts = args.n_routed_experts
338
+ self.n_local_experts = args.n_routed_experts // world_size
339
+ self.n_activated_experts = args.n_activated_experts
340
+ self.experts_start_idx = rank * self.n_local_experts
341
+ self.experts_end_idx = self.experts_start_idx + self.n_local_experts
342
+ self.gate = Gate(args)
343
+ self.experts = nn.ModuleList([Expert(args.dim, args.moe_inter_dim) if self.experts_start_idx <= i < self.experts_end_idx else None
344
+ for i in range(self.n_routed_experts)])
345
+ self.shared_experts = MLP(args.dim, args.n_shared_experts * args.moe_inter_dim)
346
+
347
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
348
+ shape = x.size()
349
+ x = x.view(-1, self.dim)
350
+ weights, indices = self.gate(x)
351
+ y = torch.zeros_like(x)
352
+ counts = torch.bincount(indices.flatten(), minlength=self.n_routed_experts).tolist()
353
+ for i in range(self.experts_start_idx, self.experts_end_idx):
354
+ if counts[i] == 0:
355
+ continue
356
+ expert = self.experts[i]
357
+ idx, top = torch.where(indices == i)
358
+ y[idx] += expert(x[idx]) * weights[idx, top, None]
359
+ z = self.shared_experts(x)
360
+ if world_size > 1:
361
+ dist.all_reduce(y)
362
+ return (y + z).view(shape)
363
+
364
+
365
+ class Block(nn.Module):
366
+ def __init__(self, layer_id: int, args: ModelArgs):
367
+ super().__init__()
368
+ self.attn = MLA(args)
369
+ self.ffn = MLP(args.dim, args.inter_dim) if layer_id < args.n_dense_layers else MoE(args)
370
+ self.attn_norm = RMSNorm(args.dim)
371
+ self.ffn_norm = RMSNorm(args.dim)
372
+
373
+ def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]) -> torch.Tensor:
374
+ x = x + self.attn(self.attn_norm(x), start_pos, freqs_cis, mask)
375
+ x = x + self.ffn(self.ffn_norm(x))
376
+ return x
377
+
378
+
379
+ class Transformer(nn.Module):
380
+ def __init__(self, args: ModelArgs):
381
+ global world_size, rank
382
+ world_size = dist.get_world_size() if dist.is_initialized() else 1
383
+ rank = dist.get_rank() if dist.is_initialized() else 0
384
+ Linear.dtype = torch.float8_e4m3fn if args.dtype == "fp8" else torch.bfloat16
385
+ super().__init__()
386
+ self.max_seq_len = args.max_seq_len
387
+ self.embed = ParallelEmbedding(args.vocab_size, args.dim)
388
+ self.layers = torch.nn.ModuleList()
389
+ for layer_id in range(args.n_layers):
390
+ self.layers.append(Block(layer_id, args))
391
+ self.norm = RMSNorm(args.dim)
392
+ self.head = ColumnParallelLinear(args.dim, args.vocab_size, dtype=torch.get_default_dtype())
393
+ self.register_buffer("freqs_cis", precompute_freqs_cis(args), persistent=False)
394
+
395
+ @torch.inference_mode()
396
+ def forward(self, tokens: torch.Tensor, start_pos: int = 0):
397
+ seqlen = tokens.size(1)
398
+ h = self.embed(tokens)
399
+ freqs_cis = self.freqs_cis[start_pos:start_pos+seqlen]
400
+ mask = None
401
+ if seqlen > 1:
402
+ mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device).triu_(1)
403
+ for layer in self.layers:
404
+ h = layer(h, start_pos, freqs_cis, mask)
405
+ h = self.norm(h)[:, -1]
406
+ logits = self.head(h)
407
+ if world_size > 1:
408
+ all_logits = [torch.empty_like(logits) for _ in range(world_size)]
409
+ dist.all_gather(all_logits, logits)
410
+ logits = torch.cat(all_logits, dim=-1)
411
+ return logits
412
+
413
+
414
+ if __name__ == "__main__":
415
+ torch.set_default_dtype(torch.bfloat16)
416
+ torch.set_default_device("cuda")
417
+ torch.manual_seed(0)
418
+ args = ModelArgs()
419
+ x = torch.randint(0, args.vocab_size, (2, 128))
420
+ model = Transformer(args)
421
+ print(model(x).size())
inference/__pycache__/kernel.cpython-310.pyc ADDED
Binary file (4.41 kB). View file
 
inference/configs/config_16B.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vocab_size": 102400,
3
+ "dim": 2048,
4
+ "inter_dim": 10944,
5
+ "moe_inter_dim": 1408,
6
+ "n_layers": 27,
7
+ "n_dense_layers": 1,
8
+ "n_heads": 16,
9
+ "n_routed_experts": 64,
10
+ "n_shared_experts": 2,
11
+ "n_activated_experts": 6,
12
+ "route_scale": 1.0,
13
+ "q_lora_rank": 0,
14
+ "kv_lora_rank": 512,
15
+ "qk_nope_head_dim": 128,
16
+ "qk_rope_head_dim": 64,
17
+ "v_head_dim": 128,
18
+ "mscale": 0.707
19
+ }
inference/configs/config_236B.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vocab_size": 102400,
3
+ "dim": 5120,
4
+ "inter_dim": 12288,
5
+ "moe_inter_dim": 1536,
6
+ "n_layers": 60,
7
+ "n_dense_layers": 1,
8
+ "n_heads": 128,
9
+ "n_routed_experts": 160,
10
+ "n_shared_experts": 2,
11
+ "n_activated_experts": 6,
12
+ "n_expert_groups": 8,
13
+ "n_limited_groups": 3,
14
+ "route_scale": 16.0,
15
+ "q_lora_rank": 1536,
16
+ "kv_lora_rank": 512,
17
+ "qk_nope_head_dim": 128,
18
+ "qk_rope_head_dim": 64,
19
+ "v_head_dim": 128
20
+ }
inference/configs/config_671B.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "vocab_size": 129280,
3
+ "dim": 7168,
4
+ "inter_dim": 18432,
5
+ "moe_inter_dim": 2048,
6
+ "n_layers": 61,
7
+ "n_dense_layers": 3,
8
+ "n_heads": 128,
9
+ "n_routed_experts": 256,
10
+ "n_shared_experts": 1,
11
+ "n_activated_experts": 8,
12
+ "n_expert_groups": 8,
13
+ "n_limited_groups": 4,
14
+ "route_scale": 2.5,
15
+ "score_func": "sigmoid",
16
+ "q_lora_rank": 1536,
17
+ "kv_lora_rank": 512,
18
+ "qk_nope_head_dim": 128,
19
+ "qk_rope_head_dim": 64,
20
+ "v_head_dim": 128,
21
+ "dtype": "fp8"
22
+ }
inference/convert.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from argparse import ArgumentParser
4
+ from glob import glob
5
+ from tqdm import tqdm, trange
6
+
7
+ import torch
8
+ from safetensors.torch import safe_open, save_file
9
+
10
+
11
+ mapping = {
12
+ "embed_tokens": ("embed", 0),
13
+ "input_layernorm": ("attn_norm", None),
14
+ "post_attention_layernorm": ("ffn_norm", None),
15
+ "q_proj": ("wq", 0),
16
+ "q_a_proj": ("wq_a", None),
17
+ "q_a_layernorm": ("q_norm", None),
18
+ "q_b_proj": ("wq_b", 0),
19
+ "kv_a_proj_with_mqa": ("wkv_a", None),
20
+ "kv_a_layernorm": ("kv_norm", None),
21
+ "kv_b_proj": ("wkv_b", 0),
22
+ "o_proj": ("wo", 1),
23
+ "gate": ("gate", None),
24
+ "gate_proj": ("w1", 0),
25
+ "down_proj": ("w2", 1),
26
+ "up_proj": ("w3", 0),
27
+ "norm": ("norm", None),
28
+ "lm_head": ("head", 0),
29
+ "scale": ("scale", None),
30
+ }
31
+
32
+
33
+ def main(hf_ckpt_path, save_path, n_experts, mp):
34
+ torch.set_num_threads(8)
35
+ n_local_experts = n_experts // mp
36
+ state_dicts = [{} for _ in range(mp)]
37
+
38
+ for file_path in tqdm(glob(os.path.join(hf_ckpt_path, "*.safetensors"))):
39
+ with safe_open(file_path, framework="pt", device="cpu") as f:
40
+ for name in f.keys():
41
+ if "model.layers.61" in name:
42
+ continue
43
+ param: torch.Tensor = f.get_tensor(name)
44
+ if name.startswith("model."):
45
+ name = name[len("model."):]
46
+ name = name.replace("self_attn", "attn")
47
+ name = name.replace("mlp", "ffn")
48
+ name = name.replace("weight_scale_inv", "scale")
49
+ name = name.replace("e_score_correction_bias", "bias")
50
+ key = name.split(".")[-2]
51
+ assert key in mapping
52
+ new_key, dim = mapping[key]
53
+ name = name.replace(key, new_key)
54
+ for i in range(mp):
55
+ new_param = param
56
+ if "experts" in name and "shared_experts" not in name:
57
+ idx = int(name.split(".")[-3])
58
+ if idx < i * n_local_experts or idx >= (i + 1) * n_local_experts:
59
+ continue
60
+ elif dim is not None:
61
+ assert param.size(dim) % mp == 0
62
+ shard_size = param.size(dim) // mp
63
+ new_param = param.narrow(dim, i * shard_size, shard_size).contiguous()
64
+ state_dicts[i][name] = new_param
65
+
66
+ os.makedirs(save_path, exist_ok=True)
67
+
68
+ for i in trange(mp):
69
+ save_file(state_dicts[i], os.path.join(save_path, f"model{i}-mp{mp}.safetensors"))
70
+
71
+ for file_path in glob(os.path.join(hf_ckpt_path, "*token*")):
72
+ new_file_path = os.path.join(save_path, os.path.basename(file_path))
73
+ shutil.copyfile(file_path, new_file_path)
74
+
75
+
76
+ if __name__ == "__main__":
77
+ parser = ArgumentParser()
78
+ parser.add_argument("--hf-ckpt-path", type=str, required=True)
79
+ parser.add_argument("--save-path", type=str, required=True)
80
+ parser.add_argument("--n-experts", type=int, required=True)
81
+ parser.add_argument("--model-parallel", type=int, default=1)
82
+ args = parser.parse_args()
83
+ assert args.n_experts % args.model_parallel == 0
84
+ main(args.hf_ckpt_path, args.save_path, args.n_experts, args.model_parallel)
inference/fp8_cast_bf16.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from argparse import ArgumentParser
4
+ from glob import glob
5
+ from tqdm import tqdm
6
+
7
+ import torch
8
+ from safetensors.torch import load_file, save_file
9
+
10
+ from kernel import weight_dequant
11
+
12
+ def main(fp8_path, bf16_path):
13
+ torch.set_default_dtype(torch.bfloat16)
14
+ os.makedirs(bf16_path, exist_ok=True)
15
+ model_index_file = os.path.join(fp8_path, "model.safetensors.index.json")
16
+ with open(model_index_file, "r") as f:
17
+ model_index = json.load(f)
18
+ weight_map = model_index["weight_map"]
19
+
20
+ # Cache for loaded safetensor files
21
+ loaded_files = {}
22
+ fp8_weight_names = []
23
+
24
+ # Helper function to get tensor from the correct file
25
+ def get_tensor(tensor_name):
26
+ file_name = weight_map[tensor_name]
27
+ if file_name not in loaded_files:
28
+ file_path = os.path.join(fp8_path, file_name)
29
+ loaded_files[file_name] = load_file(file_path, device="cuda")
30
+ return loaded_files[file_name][tensor_name]
31
+
32
+ safetensor_files = list(glob(os.path.join(fp8_path, "*.safetensors")))
33
+ safetensor_files.sort()
34
+ for safetensor_file in tqdm(safetensor_files):
35
+ file_name = os.path.basename(safetensor_file)
36
+ current_state_dict = load_file(safetensor_file, device="cuda")
37
+ loaded_files[file_name] = current_state_dict
38
+
39
+ new_state_dict = {}
40
+ for weight_name, weight in current_state_dict.items():
41
+ if weight_name.endswith("_scale_inv"):
42
+ continue
43
+ elif weight.element_size() == 1: # FP8 weight
44
+ scale_inv_name = f"{weight_name}_scale_inv"
45
+ try:
46
+ # Get scale_inv from the correct file
47
+ scale_inv = get_tensor(scale_inv_name)
48
+ fp8_weight_names.append(weight_name)
49
+ new_state_dict[weight_name] = weight_dequant(weight, scale_inv)
50
+ except KeyError:
51
+ print(f"Warning: Missing scale_inv tensor for {weight_name}, skipping conversion")
52
+ new_state_dict[weight_name] = weight
53
+ else:
54
+ new_state_dict[weight_name] = weight
55
+
56
+ new_safetensor_file = os.path.join(bf16_path, file_name)
57
+ save_file(new_state_dict, new_safetensor_file)
58
+
59
+ # Memory management: keep only the 2 most recently used files
60
+ if len(loaded_files) > 2:
61
+ oldest_file = next(iter(loaded_files))
62
+ del loaded_files[oldest_file]
63
+ torch.cuda.empty_cache()
64
+
65
+ # Update model index
66
+ new_model_index_file = os.path.join(bf16_path, "model.safetensors.index.json")
67
+ for weight_name in fp8_weight_names:
68
+ scale_inv_name = f"{weight_name}_scale_inv"
69
+ if scale_inv_name in weight_map:
70
+ weight_map.pop(scale_inv_name)
71
+ with open(new_model_index_file, "w") as f:
72
+ json.dump({"metadata": {}, "weight_map": weight_map}, f, indent=2)
73
+
74
+
75
+ if __name__ == "__main__":
76
+ parser = ArgumentParser()
77
+ parser.add_argument("--input-fp8-hf-path", type=str, required=True)
78
+ parser.add_argument("--output-bf16-hf-path", type=str, required=True)
79
+ args = parser.parse_args()
80
+ main(args.input_fp8_hf_path, args.output_bf16_hf_path)
81
+
inference/generate.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from argparse import ArgumentParser
4
+ from typing import List
5
+
6
+ import torch
7
+ import torch.distributed as dist
8
+ from transformers import AutoTokenizer
9
+ from safetensors.torch import load_model
10
+
11
+ from model import Transformer, ModelArgs
12
+
13
+
14
+ def sample(logits, temperature: float = 1.0):
15
+ logits = logits / max(temperature, 1e-5)
16
+ probs = torch.softmax(logits, dim=-1)
17
+ return probs.div_(torch.empty_like(probs).exponential_(1)).argmax(dim=-1)
18
+
19
+
20
+ @torch.inference_mode()
21
+ def generate(
22
+ model: Transformer,
23
+ prompt_tokens: List[List[int]],
24
+ max_new_tokens: int,
25
+ eos_id: int,
26
+ temperature: float = 1.0
27
+ ) -> List[List[int]]:
28
+ prompt_lens = [len(t) for t in prompt_tokens]
29
+ assert max(prompt_lens) <= model.max_seq_len
30
+ total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens))
31
+ tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device="cuda")
32
+ for i, t in enumerate(prompt_tokens):
33
+ tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
34
+ prev_pos = 0
35
+ finished = torch.tensor([False] * len(prompt_tokens), device="cuda")
36
+ prompt_mask = tokens != -1
37
+ for cur_pos in range(min(prompt_lens), total_len):
38
+ logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
39
+ if temperature > 0:
40
+ next_token = sample(logits, temperature)
41
+ else:
42
+ next_token = logits.argmax(dim=-1)
43
+ next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token)
44
+ tokens[:, cur_pos] = next_token
45
+ finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id)
46
+ prev_pos = cur_pos
47
+ if finished.all():
48
+ break
49
+ completion_tokens = []
50
+ for i, toks in enumerate(tokens.tolist()):
51
+ toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens]
52
+ if eos_id in toks:
53
+ toks = toks[:toks.index(eos_id)]
54
+ completion_tokens.append(toks)
55
+ return completion_tokens
56
+
57
+
58
+ def main(
59
+ ckpt_path: str,
60
+ config: str,
61
+ input_file: str = "",
62
+ interactive: bool = True,
63
+ max_new_tokens: int = 100,
64
+ temperature: float = 1.0,
65
+ ) -> None:
66
+ world_size = int(os.getenv("WORLD_SIZE", "1"))
67
+ rank = int(os.getenv("RANK", "0"))
68
+ local_rank = int(os.getenv("LOCAL_RANK", "0"))
69
+ if world_size > 1:
70
+ dist.init_process_group("nccl")
71
+ global print
72
+ if rank != 0:
73
+ print = lambda *_, **__: None
74
+ torch.cuda.set_device(local_rank)
75
+ torch.set_default_dtype(torch.bfloat16)
76
+ torch.set_num_threads(8)
77
+ torch.manual_seed(965)
78
+ with open(config) as f:
79
+ args = ModelArgs(**json.load(f))
80
+ print(args)
81
+ with torch.device("cuda"):
82
+ model = Transformer(args)
83
+ tokenizer = AutoTokenizer.from_pretrained(ckpt_path)
84
+ tokenizer.decode(generate(model, [tokenizer.encode("DeepSeek")], 2, -1, 1.)[0])
85
+ load_model(model, os.path.join(ckpt_path, f"model{rank}-mp{world_size}.safetensors"))
86
+
87
+ if interactive:
88
+ messages = []
89
+ while True:
90
+ if world_size == 1:
91
+ prompt = input(">>> ")
92
+ elif rank == 0:
93
+ prompt = input(">>> ")
94
+ objects = [prompt]
95
+ dist.broadcast_object_list(objects, 0)
96
+ else:
97
+ objects = [None]
98
+ dist.broadcast_object_list(objects, 0)
99
+ prompt = objects[0]
100
+ if prompt == "/exit":
101
+ break
102
+ elif prompt == "/clear":
103
+ messages.clear()
104
+ continue
105
+ messages.append({"role": "user", "content": prompt})
106
+ prompt_tokens = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
107
+ completion_tokens = generate(model, [prompt_tokens], max_new_tokens, tokenizer.eos_token_id, temperature)
108
+ completion = tokenizer.decode(completion_tokens[0], skip_special_tokens=True)
109
+ print(completion)
110
+ messages.append({"role": "assistant", "content": completion})
111
+ else:
112
+ with open(input_file) as f:
113
+ prompts = [line.strip() for line in f.readlines()]
114
+ assert len(prompts) <= args.max_batch_size
115
+ prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts]
116
+ completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature)
117
+ completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)
118
+ for prompt, completion in zip(prompts, completions):
119
+ print("Prompt:", prompt)
120
+ print("Completion:", completion)
121
+ print()
122
+
123
+ if world_size > 1:
124
+ dist.destroy_process_group()
125
+
126
+
127
+ if __name__ == "__main__":
128
+ parser = ArgumentParser()
129
+ parser.add_argument("--ckpt-path", type=str, required=True)
130
+ parser.add_argument("--config", type=str, required=True)
131
+ parser.add_argument("--input-file", type=str, default="")
132
+ parser.add_argument("--interactive", action="store_true")
133
+ parser.add_argument("--max-new-tokens", type=int, default=200)
134
+ parser.add_argument("--temperature", type=float, default=0.2)
135
+ args = parser.parse_args()
136
+ assert args.input_file or args.interactive
137
+ main(args.ckpt_path, args.config, args.input_file, args.interactive, args.max_new_tokens, args.temperature)
inference/kernel.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import torch
4
+ import triton
5
+ import triton.language as tl
6
+ from triton import Config
7
+
8
+
9
+ @triton.jit
10
+ def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):
11
+ pid = tl.program_id(axis=0)
12
+ offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
13
+ x = tl.load(x_ptr + offs).to(tl.float32)
14
+ s = tl.max(tl.abs(x)) / 448.
15
+ y = x / s
16
+ y = y.to(y_ptr.dtype.element_ty)
17
+ tl.store(y_ptr + offs, y)
18
+ tl.store(s_ptr + pid, s)
19
+
20
+
21
+ def act_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:
22
+ assert x.is_contiguous()
23
+ assert x.size(-1) % block_size == 0
24
+ y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
25
+ s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
26
+ grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )
27
+ act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)
28
+ return y, s
29
+
30
+
31
+ @triton.jit
32
+ def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):
33
+ pid_m = tl.program_id(axis=0)
34
+ pid_n = tl.program_id(axis=1)
35
+ n = tl.cdiv(N, BLOCK_SIZE)
36
+ offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
37
+ offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
38
+ offs = offs_m[:, None] * N + offs_n[None, :]
39
+ mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
40
+ x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
41
+ s = tl.load(s_ptr + pid_m * n + pid_n)
42
+ y = x * s
43
+ tl.store(y_ptr + offs, y, mask=mask)
44
+
45
+
46
+ def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:
47
+ assert x.is_contiguous() and s.is_contiguous()
48
+ assert x.dim() == 2 and s.dim() == 2
49
+ M, N = x.size()
50
+ y = torch.empty_like(x, dtype=torch.get_default_dtype())
51
+ grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
52
+ weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)
53
+ return y
54
+
55
+
56
+ fp8_gemm_configs = [
57
+ Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8)
58
+ for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6]
59
+ ]
60
+
61
+ @triton.autotune(configs=fp8_gemm_configs, key=['N', 'K'])
62
+ @triton.jit
63
+ def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr,
64
+ a_s_ptr, b_s_ptr,
65
+ M, N: tl.constexpr, K: tl.constexpr,
66
+ BLOCK_SIZE_M: tl.constexpr,
67
+ BLOCK_SIZE_N: tl.constexpr,
68
+ BLOCK_SIZE_K: tl.constexpr):
69
+ pid_m = tl.program_id(axis=0)
70
+ pid_n = tl.program_id(axis=1)
71
+ k = tl.cdiv(K, BLOCK_SIZE_K)
72
+ offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
73
+ offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
74
+ offs_k = tl.arange(0, BLOCK_SIZE_K)
75
+ a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :]
76
+ b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None]
77
+ a_s_ptrs = a_s_ptr + offs_m * k
78
+ b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k
79
+
80
+ accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
81
+ for i in range(k):
82
+ a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0)
83
+ b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0)
84
+ a_s = tl.load(a_s_ptrs)
85
+ b_s = tl.load(b_s_ptrs)
86
+ accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
87
+ a_ptrs += BLOCK_SIZE_K
88
+ b_ptrs += BLOCK_SIZE_K
89
+ a_s_ptrs += 1
90
+ b_s_ptrs += 1
91
+ c = accumulator.to(c_ptr.dtype.element_ty)
92
+ offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
93
+ offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
94
+ c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :]
95
+ mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
96
+ tl.store(c_ptrs, c, mask=mask)
97
+
98
+
99
+ def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor):
100
+ assert a.is_contiguous() and b.is_contiguous()
101
+ assert a_s.is_contiguous() and b_s.is_contiguous()
102
+ K = a.size(-1)
103
+ M = a.numel() // K
104
+ N = b.size(0)
105
+ c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype())
106
+ grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N']))
107
+ fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K)
108
+ return c
inference/model.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Tuple, Optional, Literal
4
+
5
+ import torch
6
+ from torch import nn
7
+ import torch.nn.functional as F
8
+ import torch.distributed as dist
9
+
10
+ from kernel import act_quant, weight_dequant, fp8_gemm
11
+
12
+
13
+ world_size = 1
14
+ rank = 0
15
+ block_size = 128
16
+ gemm_impl: Literal["bf16", "fp8"] = "bf16"
17
+ attn_impl: Literal["naive", "absorb"] = "absorb"
18
+
19
+ @dataclass
20
+ class ModelArgs:
21
+ max_batch_size: int = 8
22
+ max_seq_len: int = 4096 * 4
23
+ dtype: Literal["bf16", "fp8"] = "bf16"
24
+ vocab_size: int = 102400
25
+ dim: int = 2048
26
+ inter_dim: int = 10944
27
+ moe_inter_dim: int = 1408
28
+ n_layers: int = 27
29
+ n_dense_layers: int = 1
30
+ n_heads: int = 16
31
+ # moe
32
+ n_routed_experts: int = 64
33
+ n_shared_experts: int = 2
34
+ n_activated_experts: int = 6
35
+ n_expert_groups: int = 1
36
+ n_limited_groups: int = 1
37
+ score_func: Literal["softmax", "sigmoid"] = "softmax"
38
+ route_scale: float = 1.
39
+ # mla
40
+ q_lora_rank: int = 0
41
+ kv_lora_rank: int = 512
42
+ qk_nope_head_dim: int = 128
43
+ qk_rope_head_dim: int = 64
44
+ v_head_dim: int = 128
45
+ # yarn
46
+ original_seq_len: int = 4096
47
+ rope_theta: float = 10000.0
48
+ rope_factor: float = 40
49
+ beta_fast: int = 32
50
+ beta_slow: int = 1
51
+ mscale: float = 1.
52
+
53
+
54
+ class ParallelEmbedding(nn.Module):
55
+ def __init__(self, vocab_size: int, dim: int):
56
+ super().__init__()
57
+ self.vocab_size = vocab_size
58
+ self.dim = dim
59
+ assert vocab_size % world_size == 0
60
+ self.part_vocab_size = (vocab_size // world_size)
61
+ self.vocab_start_idx = rank * self.part_vocab_size
62
+ self.vocab_end_idx = self.vocab_start_idx + self.part_vocab_size
63
+ self.weight = nn.Parameter(torch.empty(self.part_vocab_size, self.dim))
64
+
65
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
66
+ if world_size > 1:
67
+ mask = (x < self.vocab_start_idx) | (x >= self.vocab_end_idx)
68
+ x = x - self.vocab_start_idx
69
+ x[mask] = 0
70
+ y = F.embedding(x, self.weight)
71
+ if world_size > 1:
72
+ y[mask] = 0
73
+ dist.all_reduce(y)
74
+ return y
75
+
76
+
77
+ def linear(x: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor:
78
+ if weight.element_size() > 1:
79
+ return F.linear(x, weight, bias)
80
+ elif gemm_impl == "bf16":
81
+ weight = weight_dequant(weight, weight.scale)
82
+ return F.linear(x, weight, bias)
83
+ else:
84
+ x, scale = act_quant(x, block_size)
85
+ y = fp8_gemm(x, scale, weight, weight.scale)
86
+ if bias is not None:
87
+ y += bias
88
+ return y
89
+
90
+
91
+ class Linear(nn.Module):
92
+ dtype = torch.bfloat16
93
+
94
+ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
95
+ super().__init__()
96
+ self.in_features = in_features
97
+ self.out_features = out_features
98
+ self.weight = nn.Parameter(torch.empty(out_features, in_features, dtype=dtype or Linear.dtype))
99
+ if self.weight.element_size() == 1:
100
+ scale_out_features = (out_features + block_size - 1) // block_size
101
+ scale_in_features = (in_features + block_size - 1) // block_size
102
+ self.weight.scale = self.scale = nn.Parameter(torch.empty(scale_out_features, scale_in_features, dtype=torch.float32))
103
+ else:
104
+ self.register_parameter("scale", None)
105
+ if bias:
106
+ self.bias = nn.Parameter(torch.empty(self.part_out_features))
107
+ else:
108
+ self.register_parameter("bias", None)
109
+
110
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
111
+ return linear(x, self.weight, self.bias)
112
+
113
+
114
+ class ColumnParallelLinear(Linear):
115
+ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
116
+ assert out_features % world_size == 0
117
+ self.part_out_features = out_features // world_size
118
+ super().__init__(in_features, self.part_out_features, bias, dtype)
119
+
120
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
121
+ y = linear(x, self.weight, self.bias)
122
+ return y
123
+
124
+
125
+ class RowParallelLinear(Linear):
126
+ def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
127
+ assert in_features % world_size == 0
128
+ self.part_in_features = in_features // world_size
129
+ super().__init__(self.part_in_features, out_features, bias, dtype)
130
+
131
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
132
+ y = linear(x, self.weight)
133
+ if world_size > 1:
134
+ dist.all_reduce(y)
135
+ if self.bias is not None:
136
+ y += self.bias
137
+ return y
138
+
139
+
140
+ class RMSNorm(nn.Module):
141
+ def __init__(self, dim: int, eps: float = 1e-6):
142
+ super().__init__()
143
+ self.eps = eps
144
+ self.weight = nn.Parameter(torch.ones(dim))
145
+
146
+ def forward(self, x: torch.Tensor):
147
+ x = x.float()
148
+ y = x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
149
+ return y.type_as(self.weight) * self.weight
150
+
151
+
152
+ def precompute_freqs_cis(args: ModelArgs) -> torch.Tensor:
153
+ dim = args.qk_rope_head_dim
154
+ seqlen = args.max_seq_len
155
+ beta_fast = args.beta_fast
156
+ beta_slow = args.beta_slow
157
+ base = args.rope_theta
158
+ factor = args.rope_factor
159
+
160
+ def find_correction_dim(num_rotations, dim, base, max_seq_len):
161
+ return dim * math.log(max_seq_len / (num_rotations * 2 * math.pi)) / (2 * math.log(base))
162
+
163
+ def find_correction_range(low_rot, high_rot, dim, base, max_seq_len):
164
+ low = math.floor(find_correction_dim(low_rot, dim, base, max_seq_len))
165
+ high = math.ceil(find_correction_dim(high_rot, dim, base, max_seq_len))
166
+ return max(low, 0), min(high, dim-1)
167
+
168
+ def linear_ramp_factor(min, max, dim):
169
+ if min == max:
170
+ max += 0.001
171
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
172
+ ramp_func = torch.clamp(linear_func, 0, 1)
173
+ return ramp_func
174
+
175
+ freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
176
+ if seqlen > args.original_seq_len:
177
+ low, high = find_correction_range(beta_fast, beta_slow, dim, base, args.original_seq_len)
178
+ smooth = 1 - linear_ramp_factor(low, high, dim // 2)
179
+ freqs = freqs / factor * (1 - smooth) + freqs * smooth
180
+
181
+ t = torch.arange(seqlen)
182
+ freqs = torch.outer(t, freqs)
183
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
184
+ return freqs_cis
185
+
186
+
187
+ def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor:
188
+ dtype = x.dtype
189
+ x = torch.view_as_complex(x.float().view(*x.shape[:-1], -1, 2))
190
+ freqs_cis = freqs_cis.view(1, x.size(1), 1, x.size(-1))
191
+ y = torch.view_as_real(x * freqs_cis).flatten(3)
192
+ return y.to(dtype)
193
+
194
+
195
+ class MLA(nn.Module):
196
+ def __init__(self, args: ModelArgs):
197
+ super().__init__()
198
+ self.dim = args.dim
199
+ self.n_heads = args.n_heads
200
+ self.n_local_heads = args.n_heads // world_size
201
+ self.q_lora_rank = args.q_lora_rank
202
+ self.kv_lora_rank = args.kv_lora_rank
203
+ self.qk_nope_head_dim = args.qk_nope_head_dim
204
+ self.qk_rope_head_dim = args.qk_rope_head_dim
205
+ self.qk_head_dim = args.qk_nope_head_dim + args.qk_rope_head_dim
206
+ self.v_head_dim = args.v_head_dim
207
+
208
+ if self.q_lora_rank == 0:
209
+ self.wq = ColumnParallelLinear(self.dim, self.n_heads * self.qk_head_dim)
210
+ else:
211
+ self.wq_a = Linear(self.dim, self.q_lora_rank)
212
+ self.q_norm = RMSNorm(self.q_lora_rank)
213
+ self.wq_b = ColumnParallelLinear(self.q_lora_rank, self.n_heads * self.qk_head_dim)
214
+ self.wkv_a = Linear(self.dim, self.kv_lora_rank + self.qk_rope_head_dim)
215
+ self.kv_norm = RMSNorm(self.kv_lora_rank)
216
+ self.wkv_b = ColumnParallelLinear(self.kv_lora_rank, self.n_heads * (self.qk_nope_head_dim + self.v_head_dim))
217
+ self.wo = RowParallelLinear(self.n_heads * self.v_head_dim, self.dim)
218
+ self.softmax_scale = self.qk_head_dim ** -0.5
219
+ if args.max_seq_len > args.original_seq_len:
220
+ mscale = 0.1 * args.mscale * math.log(args.rope_factor) + 1.0
221
+ self.softmax_scale = self.softmax_scale * mscale * mscale
222
+
223
+ if attn_impl == "naive":
224
+ self.register_buffer("k_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.qk_head_dim), persistent=False)
225
+ self.register_buffer("v_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.v_head_dim), persistent=False)
226
+ else:
227
+ self.register_buffer("kv_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.kv_lora_rank), persistent=False)
228
+ self.register_buffer("pe_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.qk_rope_head_dim), persistent=False)
229
+
230
+ def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]):
231
+ bsz, seqlen, _ = x.size()
232
+ end_pos = start_pos + seqlen
233
+ if self.q_lora_rank == 0:
234
+ q = self.wq(x)
235
+ else:
236
+ q = self.wq_b(self.q_norm(self.wq_a(x)))
237
+ q = q.view(bsz, seqlen, self.n_local_heads, self.qk_head_dim)
238
+ q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
239
+ q_pe = apply_rotary_emb(q_pe, freqs_cis)
240
+ kv = self.wkv_a(x)
241
+ kv, k_pe = torch.split(kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
242
+ k_pe = apply_rotary_emb(k_pe.unsqueeze(2), freqs_cis)
243
+ if attn_impl == "naive":
244
+ q = torch.cat([q_nope, q_pe], dim=-1)
245
+ kv = self.wkv_b(self.kv_norm(kv))
246
+ kv = kv.view(bsz, seqlen, self.n_local_heads, self.qk_nope_head_dim + self.v_head_dim)
247
+ k_nope, v = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
248
+ k = torch.cat([k_nope, k_pe.expand(-1, -1, self.n_local_heads, -1)], dim=-1)
249
+ self.k_cache[:bsz, start_pos:end_pos] = k
250
+ self.v_cache[:bsz, start_pos:end_pos] = v
251
+ scores = torch.einsum("bshd,bthd->bsht", q, self.k_cache[:bsz, :end_pos]) * self.softmax_scale
252
+ else:
253
+ wkv_b = self.wkv_b.weight if self.wkv_b.scale is None else weight_dequant(self.wkv_b.weight, self.wkv_b.scale, block_size)
254
+ wkv_b = wkv_b.view(self.n_local_heads, -1, self.kv_lora_rank)
255
+ q_nope = torch.einsum("bshd,hdc->bshc", q_nope, wkv_b[:, :self.qk_nope_head_dim])
256
+ self.kv_cache[:bsz, start_pos:end_pos] = self.kv_norm(kv)
257
+ self.pe_cache[:bsz, start_pos:end_pos] = k_pe.squeeze(2)
258
+ scores = (torch.einsum("bshc,btc->bsht", q_nope, self.kv_cache[:bsz, :end_pos]) +
259
+ torch.einsum("bshr,btr->bsht", q_pe, self.pe_cache[:bsz, :end_pos])) * self.softmax_scale
260
+ if mask is not None:
261
+ scores += mask.unsqueeze(1)
262
+ scores = scores.softmax(dim=-1, dtype=torch.float32).type_as(x)
263
+ if attn_impl == "naive":
264
+ x = torch.einsum("bsht,bthd->bshd", scores, self.v_cache[:bsz, :end_pos])
265
+ else:
266
+ x = torch.einsum("bsht,btc->bshc", scores, self.kv_cache[:bsz, :end_pos])
267
+ x = torch.einsum("bshc,hdc->bshd", x, wkv_b[:, -self.v_head_dim:])
268
+ x = self.wo(x.flatten(2))
269
+ return x
270
+
271
+
272
+ class MLP(nn.Module):
273
+ def __init__(self, dim: int, inter_dim: int):
274
+ super().__init__()
275
+ self.w1 = ColumnParallelLinear(dim, inter_dim)
276
+ self.w2 = RowParallelLinear(inter_dim, dim)
277
+ self.w3 = ColumnParallelLinear(dim, inter_dim)
278
+
279
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
280
+ return self.w2(F.silu(self.w1(x)) * self.w3(x))
281
+
282
+
283
+ class Gate(nn.Module):
284
+ def __init__(self, args: ModelArgs):
285
+ super().__init__()
286
+ self.dim = args.dim
287
+ self.topk = args.n_activated_experts
288
+ self.n_groups = args.n_expert_groups
289
+ self.topk_groups = args.n_limited_groups
290
+ self.score_func = args.score_func
291
+ self.route_scale = args.route_scale
292
+ self.weight = nn.Parameter(torch.empty(args.n_routed_experts, args.dim))
293
+ self.bias = nn.Parameter(torch.empty(args.n_routed_experts)) if self.dim == 7168 else None
294
+
295
+ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
296
+ scores = linear(x, self.weight)
297
+ if self.score_func == "softmax":
298
+ scores = scores.softmax(dim=-1, dtype=torch.float32)
299
+ else:
300
+ scores = scores.sigmoid()
301
+ original_scores = scores
302
+ if self.bias is not None:
303
+ scores = scores + self.bias
304
+ if self.n_groups > 1:
305
+ scores = scores.view(x.size(0), self.n_groups, -1)
306
+ if self.bias is None:
307
+ group_scores = scores.amax(dim=-1)
308
+ else:
309
+ group_scores = scores.topk(2, dim=-1)[0].sum(dim=-1)
310
+ indices = group_scores.topk(self.topk_groups, dim=-1)[1]
311
+ mask = torch.zeros_like(scores[..., 0]).scatter_(1, indices, True)
312
+ scores = (scores * mask.unsqueeze(-1)).flatten(1)
313
+ indices = torch.topk(scores, self.topk, dim=-1)[1]
314
+ weights = original_scores.gather(1, indices)
315
+ if self.score_func == "sigmoid":
316
+ weights /= weights.sum(dim=-1, keepdim=True)
317
+ weights *= self.route_scale
318
+ return weights.type_as(x), indices
319
+
320
+
321
+ class Expert(nn.Module):
322
+ def __init__(self, dim: int, inter_dim: int):
323
+ super().__init__()
324
+ self.w1 = Linear(dim, inter_dim)
325
+ self.w2 = Linear(inter_dim, dim)
326
+ self.w3 = Linear(dim, inter_dim)
327
+
328
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
329
+ return self.w2(F.silu(self.w1(x)) * self.w3(x))
330
+
331
+
332
+ class MoE(nn.Module):
333
+ def __init__(self, args: ModelArgs):
334
+ super().__init__()
335
+ self.dim = args.dim
336
+ assert args.n_routed_experts % world_size == 0
337
+ self.n_routed_experts = args.n_routed_experts
338
+ self.n_local_experts = args.n_routed_experts // world_size
339
+ self.n_activated_experts = args.n_activated_experts
340
+ self.experts_start_idx = rank * self.n_local_experts
341
+ self.experts_end_idx = self.experts_start_idx + self.n_local_experts
342
+ self.gate = Gate(args)
343
+ self.experts = nn.ModuleList([Expert(args.dim, args.moe_inter_dim) if self.experts_start_idx <= i < self.experts_end_idx else None
344
+ for i in range(self.n_routed_experts)])
345
+ self.shared_experts = MLP(args.dim, args.n_shared_experts * args.moe_inter_dim)
346
+
347
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
348
+ shape = x.size()
349
+ x = x.view(-1, self.dim)
350
+ weights, indices = self.gate(x)
351
+ y = torch.zeros_like(x)
352
+ counts = torch.bincount(indices.flatten(), minlength=self.n_routed_experts).tolist()
353
+ for i in range(self.experts_start_idx, self.experts_end_idx):
354
+ if counts[i] == 0:
355
+ continue
356
+ expert = self.experts[i]
357
+ idx, top = torch.where(indices == i)
358
+ y[idx] += expert(x[idx]) * weights[idx, top, None]
359
+ z = self.shared_experts(x)
360
+ if world_size > 1:
361
+ dist.all_reduce(y)
362
+ return (y + z).view(shape)
363
+
364
+
365
+ class Block(nn.Module):
366
+ def __init__(self, layer_id: int, args: ModelArgs):
367
+ super().__init__()
368
+ self.attn = MLA(args)
369
+ self.ffn = MLP(args.dim, args.inter_dim) if layer_id < args.n_dense_layers else MoE(args)
370
+ self.attn_norm = RMSNorm(args.dim)
371
+ self.ffn_norm = RMSNorm(args.dim)
372
+
373
+ def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]) -> torch.Tensor:
374
+ x = x + self.attn(self.attn_norm(x), start_pos, freqs_cis, mask)
375
+ x = x + self.ffn(self.ffn_norm(x))
376
+ return x
377
+
378
+
379
+ class Transformer(nn.Module):
380
+ def __init__(self, args: ModelArgs):
381
+ global world_size, rank
382
+ world_size = dist.get_world_size() if dist.is_initialized() else 1
383
+ rank = dist.get_rank() if dist.is_initialized() else 0
384
+ Linear.dtype = torch.float8_e4m3fn if args.dtype == "fp8" else torch.bfloat16
385
+ super().__init__()
386
+ self.max_seq_len = args.max_seq_len
387
+ self.embed = ParallelEmbedding(args.vocab_size, args.dim)
388
+ self.layers = torch.nn.ModuleList()
389
+ for layer_id in range(args.n_layers):
390
+ self.layers.append(Block(layer_id, args))
391
+ self.norm = RMSNorm(args.dim)
392
+ self.head = ColumnParallelLinear(args.dim, args.vocab_size, dtype=torch.get_default_dtype())
393
+ self.register_buffer("freqs_cis", precompute_freqs_cis(args), persistent=False)
394
+
395
+ @torch.inference_mode()
396
+ def forward(self, tokens: torch.Tensor, start_pos: int = 0):
397
+ seqlen = tokens.size(1)
398
+ h = self.embed(tokens)
399
+ freqs_cis = self.freqs_cis[start_pos:start_pos+seqlen]
400
+ mask = None
401
+ if seqlen > 1:
402
+ mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device).triu_(1)
403
+ for layer in self.layers:
404
+ h = layer(h, start_pos, freqs_cis, mask)
405
+ h = self.norm(h)[:, -1]
406
+ logits = self.head(h)
407
+ if world_size > 1:
408
+ all_logits = [torch.empty_like(logits) for _ in range(world_size)]
409
+ dist.all_gather(all_logits, logits)
410
+ logits = torch.cat(all_logits, dim=-1)
411
+ return logits
412
+
413
+
414
+ if __name__ == "__main__":
415
+ torch.set_default_dtype(torch.bfloat16)
416
+ torch.set_default_device("cuda")
417
+ torch.manual_seed(0)
418
+ args = ModelArgs()
419
+ x = torch.randint(0, args.vocab_size, (2, 128))
420
+ model = Transformer(args)
421
+ print(model(x).size())
inference/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==2.4.1
2
+ triton==3.0.0
3
+ transformers==4.46.3
4
+ safetensors==0.4.5
model-00001-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e87b89ead036e72853e1d7cf48136d40d85b90cc242944e58aa7f88900092f
3
+ size 8609454256
model-00002-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b620aa86006f54aac8d8453e9370cb8411119e3038e13df0bce82ebd954729f1
3
+ size 8602553952
model-00004-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a9fffda0b2c62ba57e87a9a246113fc3ceb45aa256cf9ff9fdd75dd345a2521
3
+ size 8598786296
model-00005-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59baf5fe1862df4db604533931b07fcb97596712a094851cf85793dc4452c268
3
+ size 8602554048
model-00006-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cfa5d5f02c7ab85d47e07e82cb9353232a842aad2f4747b01489956bf4845df
3
+ size 8741916520
model-00007-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b6f2cdd5392caaa845e140b7f178e4406043771f0d277ad1d92b5cf36a3238b
3
+ size 8606225096
model-00008-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdf1644b293adcf07b9d708f5d385f43575fd2fe962c6d766e48c84f1684d235
3
+ size 8602554144
model-00009-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b36db93b74136b8852d062c85e764ae4356c3857751c5430d3c33e01bf2fbf91
3
+ size 8598786392
model-00010-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff5bdffc3fca98f7c43b91b12bd0df70661b656050f0fd3ac7f7f1849d580b1a
3
+ size 8602553952
model-00012-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e204197e2af834ebfd4af43526914503fadfa58796b4705579314f4e9887324d
3
+ size 2642451624
model-00013-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d039dcb1f0f65987e36d045006f139b8d6c99fe8a9373a70c70991213cfa9f43
3
+ size 8598757320
model-00014-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97d7725d9ef598faba4f1284cab74b1ae32cdfd02b828079b904a52eba0d7b8e
3
+ size 8602554136
model-00015-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a46d3ea85ba43a66193d0f7546ec1c936a4b211fad91188ab8f43093b60f4d37
3
+ size 8598786408
model-00016-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6630ebc4caf694bb3918918a0a06dd00f3228c764f0725bebacc7d7c02c935e
3
+ size 8602553936
model-00017-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:becb73788e18f5b3ab21e4aaa0ed846791b9b062c247fed8daf48da3006e463c
3
+ size 8602554152
model-00018-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b9d024b0e6b87889bdae11855048dd7dc26df21bc1b354962b270cfb7ea551c
3
+ size 8598786312
model-00019-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49fb1889fd754eb3db9b25dbea6ebb5f152b1f98b9374157306c258b997e540
3
+ size 8602554032
model-00021-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6759dceaf8e72b62eca652da8cc923102f5d00d6b2a4b17c8b2b7d582fcab5a5
3
+ size 8598786512
model-00022-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f73806d4cb4f80e744b6dd6a79bd97d4c9466e48a7191066cb154f19a650fee
3
+ size 8602554416
model-00023-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b41bc4167b28a28ebf96c29ab7d744f46398030c0de383a091c27d2b378fabb7
3
+ size 8598786704
model-00024-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1e67fa44a09a698e070f0f81f637c1bca664ed40e45f24c9a307c076ae64fc
3
+ size 8602554224
model-00025-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f972cced4deee0e61071ed94ce998d099083cf4be8b431efb06ab5d105e313
3
+ size 8602554448
model-00026-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c7bf2562f107c268579a8cdae8e87e165b0c780fd7166094d4d5632b5b04b10
3
+ size 8598786616
model-00027-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c72f7f985dc12fa4e9105c55eecb9be6139b78ed17713e70013af7f375d9c929
3
+ size 8602554312
model-00028-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea9febe137ad6c326eea88e093218e28fb22fea439319025eb89e5032af7f851
3
+ size 8602554448
model-00029-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51e0500521bba5dbd8faf94bc68af7b3b6197b67498cc5f078e4e2199b14a890
3
+ size 8598786520
model-00030-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc3a398e0e0dd6995c4731f7333830ae359882a994e1790d2045e1ff4e7681ed
3
+ size 8602554408
model-00031-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d81fe26a786ed8d4238b0ffd976ead048075cfd4ee5222c02b34e5727d3b373
3
+ size 8598786720
model-00032-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8e98fbd5a8c8ec709c86608faca725309041203a71b1ea3ee20ccac1575013a
3
+ size 8602554208
model-00033-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16043317a49e745e5743194920d6d5cf73472d5e7581b5197407465903525196
3
+ size 8602554448
model-00034-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bdf4e8779f20854d43608277dec089d72b547c5636c5cf6e1b1cc8a3488e18d
3
+ size 3493899088
model-00035-of-000163.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dc868c6f6dee0fa07b69700ff7c2669d32bbe44b4c3e809d202930f5851bbef
3
+ size 8598757608