Upload 9 files
Browse files- configs/config_16B.json +19 -0
- configs/config_236B.json +20 -0
- configs/config_671B.json +22 -0
- convert.py +96 -0
- fp8_cast_bf16.py +112 -0
- generate.py +185 -0
- kernel.py +191 -0
- model.py +804 -0
- requirements.txt +5 -1
configs/config_16B.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"vocab_size": 102400,
|
3 |
+
"dim": 2048,
|
4 |
+
"inter_dim": 10944,
|
5 |
+
"moe_inter_dim": 1408,
|
6 |
+
"n_layers": 27,
|
7 |
+
"n_dense_layers": 1,
|
8 |
+
"n_heads": 16,
|
9 |
+
"n_routed_experts": 64,
|
10 |
+
"n_shared_experts": 2,
|
11 |
+
"n_activated_experts": 6,
|
12 |
+
"route_scale": 1.0,
|
13 |
+
"q_lora_rank": 0,
|
14 |
+
"kv_lora_rank": 512,
|
15 |
+
"qk_nope_head_dim": 128,
|
16 |
+
"qk_rope_head_dim": 64,
|
17 |
+
"v_head_dim": 128,
|
18 |
+
"mscale": 0.707
|
19 |
+
}
|
configs/config_236B.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"vocab_size": 102400,
|
3 |
+
"dim": 5120,
|
4 |
+
"inter_dim": 12288,
|
5 |
+
"moe_inter_dim": 1536,
|
6 |
+
"n_layers": 60,
|
7 |
+
"n_dense_layers": 1,
|
8 |
+
"n_heads": 128,
|
9 |
+
"n_routed_experts": 160,
|
10 |
+
"n_shared_experts": 2,
|
11 |
+
"n_activated_experts": 6,
|
12 |
+
"n_expert_groups": 8,
|
13 |
+
"n_limited_groups": 3,
|
14 |
+
"route_scale": 16.0,
|
15 |
+
"q_lora_rank": 1536,
|
16 |
+
"kv_lora_rank": 512,
|
17 |
+
"qk_nope_head_dim": 128,
|
18 |
+
"qk_rope_head_dim": 64,
|
19 |
+
"v_head_dim": 128
|
20 |
+
}
|
configs/config_671B.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"vocab_size": 129280,
|
3 |
+
"dim": 7168,
|
4 |
+
"inter_dim": 18432,
|
5 |
+
"moe_inter_dim": 2048,
|
6 |
+
"n_layers": 61,
|
7 |
+
"n_dense_layers": 3,
|
8 |
+
"n_heads": 128,
|
9 |
+
"n_routed_experts": 256,
|
10 |
+
"n_shared_experts": 1,
|
11 |
+
"n_activated_experts": 8,
|
12 |
+
"n_expert_groups": 8,
|
13 |
+
"n_limited_groups": 4,
|
14 |
+
"route_scale": 2.5,
|
15 |
+
"score_func": "sigmoid",
|
16 |
+
"q_lora_rank": 1536,
|
17 |
+
"kv_lora_rank": 512,
|
18 |
+
"qk_nope_head_dim": 128,
|
19 |
+
"qk_rope_head_dim": 64,
|
20 |
+
"v_head_dim": 128,
|
21 |
+
"dtype": "fp8"
|
22 |
+
}
|
convert.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import shutil
|
3 |
+
from argparse import ArgumentParser
|
4 |
+
from glob import glob
|
5 |
+
from tqdm import tqdm, trange
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from safetensors.torch import safe_open, save_file
|
9 |
+
|
10 |
+
|
11 |
+
mapping = {
|
12 |
+
"embed_tokens": ("embed", 0),
|
13 |
+
"input_layernorm": ("attn_norm", None),
|
14 |
+
"post_attention_layernorm": ("ffn_norm", None),
|
15 |
+
"q_proj": ("wq", 0),
|
16 |
+
"q_a_proj": ("wq_a", None),
|
17 |
+
"q_a_layernorm": ("q_norm", None),
|
18 |
+
"q_b_proj": ("wq_b", 0),
|
19 |
+
"kv_a_proj_with_mqa": ("wkv_a", None),
|
20 |
+
"kv_a_layernorm": ("kv_norm", None),
|
21 |
+
"kv_b_proj": ("wkv_b", 0),
|
22 |
+
"o_proj": ("wo", 1),
|
23 |
+
"gate": ("gate", None),
|
24 |
+
"gate_proj": ("w1", 0),
|
25 |
+
"down_proj": ("w2", 1),
|
26 |
+
"up_proj": ("w3", 0),
|
27 |
+
"norm": ("norm", None),
|
28 |
+
"lm_head": ("head", 0),
|
29 |
+
"scale": ("scale", None),
|
30 |
+
}
|
31 |
+
|
32 |
+
|
33 |
+
def main(hf_ckpt_path, save_path, n_experts, mp):
|
34 |
+
"""
|
35 |
+
Converts and saves model checkpoint files into a specified format.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
hf_ckpt_path (str): Path to the directory containing the input checkpoint files.
|
39 |
+
save_path (str): Path to the directory where the converted checkpoint files will be saved.
|
40 |
+
n_experts (int): Total number of experts in the model.
|
41 |
+
mp (int): Model parallelism factor.
|
42 |
+
|
43 |
+
Returns:
|
44 |
+
None
|
45 |
+
"""
|
46 |
+
torch.set_num_threads(8)
|
47 |
+
n_local_experts = n_experts // mp
|
48 |
+
state_dicts = [{} for _ in range(mp)]
|
49 |
+
|
50 |
+
for file_path in tqdm(glob(os.path.join(hf_ckpt_path, "*.safetensors"))):
|
51 |
+
with safe_open(file_path, framework="pt", device="cpu") as f:
|
52 |
+
for name in f.keys():
|
53 |
+
if "model.layers.61" in name:
|
54 |
+
continue
|
55 |
+
param: torch.Tensor = f.get_tensor(name)
|
56 |
+
if name.startswith("model."):
|
57 |
+
name = name[len("model."):]
|
58 |
+
name = name.replace("self_attn", "attn")
|
59 |
+
name = name.replace("mlp", "ffn")
|
60 |
+
name = name.replace("weight_scale_inv", "scale")
|
61 |
+
name = name.replace("e_score_correction_bias", "bias")
|
62 |
+
key = name.split(".")[-2]
|
63 |
+
assert key in mapping
|
64 |
+
new_key, dim = mapping[key]
|
65 |
+
name = name.replace(key, new_key)
|
66 |
+
for i in range(mp):
|
67 |
+
new_param = param
|
68 |
+
if "experts" in name and "shared_experts" not in name:
|
69 |
+
idx = int(name.split(".")[-3])
|
70 |
+
if idx < i * n_local_experts or idx >= (i + 1) * n_local_experts:
|
71 |
+
continue
|
72 |
+
elif dim is not None:
|
73 |
+
assert param.size(dim) % mp == 0
|
74 |
+
shard_size = param.size(dim) // mp
|
75 |
+
new_param = param.narrow(dim, i * shard_size, shard_size).contiguous()
|
76 |
+
state_dicts[i][name] = new_param
|
77 |
+
|
78 |
+
os.makedirs(save_path, exist_ok=True)
|
79 |
+
|
80 |
+
for i in trange(mp):
|
81 |
+
save_file(state_dicts[i], os.path.join(save_path, f"model{i}-mp{mp}.safetensors"))
|
82 |
+
|
83 |
+
for file_path in glob(os.path.join(hf_ckpt_path, "*token*")):
|
84 |
+
new_file_path = os.path.join(save_path, os.path.basename(file_path))
|
85 |
+
shutil.copyfile(file_path, new_file_path)
|
86 |
+
|
87 |
+
|
88 |
+
if __name__ == "__main__":
|
89 |
+
parser = ArgumentParser()
|
90 |
+
parser.add_argument("--hf-ckpt-path", type=str, required=True)
|
91 |
+
parser.add_argument("--save-path", type=str, required=True)
|
92 |
+
parser.add_argument("--n-experts", type=int, required=True)
|
93 |
+
parser.add_argument("--model-parallel", type=int, required=True)
|
94 |
+
args = parser.parse_args()
|
95 |
+
assert args.n_experts % args.model_parallel == 0
|
96 |
+
main(args.hf_ckpt_path, args.save_path, args.n_experts, args.model_parallel)
|
fp8_cast_bf16.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from argparse import ArgumentParser
|
4 |
+
from glob import glob
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from safetensors.torch import load_file, save_file
|
9 |
+
|
10 |
+
from kernel import weight_dequant
|
11 |
+
|
12 |
+
def main(fp8_path, bf16_path):
|
13 |
+
"""
|
14 |
+
Converts FP8 weights to BF16 and saves the converted weights.
|
15 |
+
|
16 |
+
This function reads FP8 weights from the specified directory, converts them to BF16,
|
17 |
+
and saves the converted weights to another specified directory. It also updates the
|
18 |
+
model index file to reflect the changes.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
fp8_path (str): The path to the directory containing the FP8 weights and model index file.
|
22 |
+
bf16_path (str): The path to the directory where the converted BF16 weights will be saved.
|
23 |
+
|
24 |
+
Raises:
|
25 |
+
KeyError: If a required scale_inv tensor is missing for a weight.
|
26 |
+
|
27 |
+
Notes:
|
28 |
+
- The function assumes that the FP8 weights are stored in safetensor files.
|
29 |
+
- The function caches loaded safetensor files to optimize memory usage.
|
30 |
+
- The function updates the model index file to remove references to scale_inv tensors.
|
31 |
+
"""
|
32 |
+
torch.set_default_dtype(torch.bfloat16)
|
33 |
+
os.makedirs(bf16_path, exist_ok=True)
|
34 |
+
model_index_file = os.path.join(fp8_path, "model.safetensors.index.json")
|
35 |
+
with open(model_index_file, "r") as f:
|
36 |
+
model_index = json.load(f)
|
37 |
+
weight_map = model_index["weight_map"]
|
38 |
+
|
39 |
+
# Cache for loaded safetensor files
|
40 |
+
loaded_files = {}
|
41 |
+
fp8_weight_names = []
|
42 |
+
|
43 |
+
# Helper function to get tensor from the correct file
|
44 |
+
def get_tensor(tensor_name):
|
45 |
+
"""
|
46 |
+
Retrieves a tensor from the cached safetensor files or loads it from disk if not cached.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
tensor_name (str): The name of the tensor to retrieve.
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
torch.Tensor: The retrieved tensor.
|
53 |
+
|
54 |
+
Raises:
|
55 |
+
KeyError: If the tensor does not exist in the safetensor file.
|
56 |
+
"""
|
57 |
+
file_name = weight_map[tensor_name]
|
58 |
+
if file_name not in loaded_files:
|
59 |
+
file_path = os.path.join(fp8_path, file_name)
|
60 |
+
loaded_files[file_name] = load_file(file_path, device="cuda")
|
61 |
+
return loaded_files[file_name][tensor_name]
|
62 |
+
|
63 |
+
safetensor_files = list(glob(os.path.join(fp8_path, "*.safetensors")))
|
64 |
+
safetensor_files.sort()
|
65 |
+
for safetensor_file in tqdm(safetensor_files):
|
66 |
+
file_name = os.path.basename(safetensor_file)
|
67 |
+
current_state_dict = load_file(safetensor_file, device="cuda")
|
68 |
+
loaded_files[file_name] = current_state_dict
|
69 |
+
|
70 |
+
new_state_dict = {}
|
71 |
+
for weight_name, weight in current_state_dict.items():
|
72 |
+
if weight_name.endswith("_scale_inv"):
|
73 |
+
continue
|
74 |
+
elif weight.element_size() == 1: # FP8 weight
|
75 |
+
scale_inv_name = f"{weight_name}_scale_inv"
|
76 |
+
try:
|
77 |
+
# Get scale_inv from the correct file
|
78 |
+
scale_inv = get_tensor(scale_inv_name)
|
79 |
+
fp8_weight_names.append(weight_name)
|
80 |
+
new_state_dict[weight_name] = weight_dequant(weight, scale_inv)
|
81 |
+
except KeyError:
|
82 |
+
print(f"Warning: Missing scale_inv tensor for {weight_name}, skipping conversion")
|
83 |
+
new_state_dict[weight_name] = weight
|
84 |
+
else:
|
85 |
+
new_state_dict[weight_name] = weight
|
86 |
+
|
87 |
+
new_safetensor_file = os.path.join(bf16_path, file_name)
|
88 |
+
save_file(new_state_dict, new_safetensor_file)
|
89 |
+
|
90 |
+
# Memory management: keep only the 2 most recently used files
|
91 |
+
if len(loaded_files) > 2:
|
92 |
+
oldest_file = next(iter(loaded_files))
|
93 |
+
del loaded_files[oldest_file]
|
94 |
+
torch.cuda.empty_cache()
|
95 |
+
|
96 |
+
# Update model index
|
97 |
+
new_model_index_file = os.path.join(bf16_path, "model.safetensors.index.json")
|
98 |
+
for weight_name in fp8_weight_names:
|
99 |
+
scale_inv_name = f"{weight_name}_scale_inv"
|
100 |
+
if scale_inv_name in weight_map:
|
101 |
+
weight_map.pop(scale_inv_name)
|
102 |
+
with open(new_model_index_file, "w") as f:
|
103 |
+
json.dump({"metadata": {}, "weight_map": weight_map}, f, indent=2)
|
104 |
+
|
105 |
+
|
106 |
+
if __name__ == "__main__":
|
107 |
+
parser = ArgumentParser()
|
108 |
+
parser.add_argument("--input-fp8-hf-path", type=str, required=True)
|
109 |
+
parser.add_argument("--output-bf16-hf-path", type=str, required=True)
|
110 |
+
args = parser.parse_args()
|
111 |
+
main(args.input_fp8_hf_path, args.output_bf16_hf_path)
|
112 |
+
|
generate.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
from argparse import ArgumentParser
|
4 |
+
from typing import List
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import torch.distributed as dist
|
8 |
+
from transformers import AutoTokenizer
|
9 |
+
from safetensors.torch import load_model
|
10 |
+
|
11 |
+
from model import Transformer, ModelArgs
|
12 |
+
|
13 |
+
|
14 |
+
def sample(logits, temperature: float = 1.0):
|
15 |
+
"""
|
16 |
+
Samples a token from the logits using temperature scaling.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
logits (torch.Tensor): The logits tensor for token predictions.
|
20 |
+
temperature (float, optional): Temperature for scaling logits. Defaults to 1.0.
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
torch.Tensor: The sampled token.
|
24 |
+
"""
|
25 |
+
logits = logits / max(temperature, 1e-5)
|
26 |
+
probs = torch.softmax(logits, dim=-1)
|
27 |
+
return probs.div_(torch.empty_like(probs).exponential_(1)).argmax(dim=-1)
|
28 |
+
|
29 |
+
|
30 |
+
@torch.inference_mode()
|
31 |
+
def generate(
|
32 |
+
model: Transformer,
|
33 |
+
prompt_tokens: List[List[int]],
|
34 |
+
max_new_tokens: int,
|
35 |
+
eos_id: int,
|
36 |
+
temperature: float = 1.0
|
37 |
+
) -> List[List[int]]:
|
38 |
+
"""
|
39 |
+
Generates new tokens based on the given prompt tokens using the specified model.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
model (Transformer): The transformer model used for token generation.
|
43 |
+
prompt_tokens (List[List[int]]): A list of lists containing the prompt tokens for each sequence.
|
44 |
+
max_new_tokens (int): The maximum number of new tokens to generate.
|
45 |
+
eos_id (int): The end-of-sequence token ID.
|
46 |
+
temperature (float, optional): The temperature value for sampling. Defaults to 1.0.
|
47 |
+
|
48 |
+
Returns:
|
49 |
+
List[List[int]]: A list of lists containing the generated tokens for each sequence.
|
50 |
+
"""
|
51 |
+
prompt_lens = [len(t) for t in prompt_tokens]
|
52 |
+
assert max(prompt_lens) <= model.max_seq_len
|
53 |
+
total_len = min(model.max_seq_len, max_new_tokens + max(prompt_lens))
|
54 |
+
tokens = torch.full((len(prompt_tokens), total_len), -1, dtype=torch.long, device="cuda")
|
55 |
+
for i, t in enumerate(prompt_tokens):
|
56 |
+
tokens[i, :len(t)] = torch.tensor(t, dtype=torch.long, device="cuda")
|
57 |
+
prev_pos = 0
|
58 |
+
finished = torch.tensor([False] * len(prompt_tokens), device="cuda")
|
59 |
+
prompt_mask = tokens != -1
|
60 |
+
for cur_pos in range(min(prompt_lens), total_len):
|
61 |
+
logits = model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
|
62 |
+
if temperature > 0:
|
63 |
+
next_token = sample(logits, temperature)
|
64 |
+
else:
|
65 |
+
next_token = logits.argmax(dim=-1)
|
66 |
+
next_token = torch.where(prompt_mask[:, cur_pos], tokens[:, cur_pos], next_token)
|
67 |
+
tokens[:, cur_pos] = next_token
|
68 |
+
finished |= torch.logical_and(~prompt_mask[:, cur_pos], next_token == eos_id)
|
69 |
+
prev_pos = cur_pos
|
70 |
+
if finished.all():
|
71 |
+
break
|
72 |
+
completion_tokens = []
|
73 |
+
for i, toks in enumerate(tokens.tolist()):
|
74 |
+
toks = toks[prompt_lens[i]:prompt_lens[i]+max_new_tokens]
|
75 |
+
if eos_id in toks:
|
76 |
+
toks = toks[:toks.index(eos_id)]
|
77 |
+
completion_tokens.append(toks)
|
78 |
+
return completion_tokens
|
79 |
+
|
80 |
+
|
81 |
+
def main(
|
82 |
+
ckpt_path: str,
|
83 |
+
config: str,
|
84 |
+
input_file: str = "",
|
85 |
+
interactive: bool = True,
|
86 |
+
max_new_tokens: int = 100,
|
87 |
+
temperature: float = 1.0,
|
88 |
+
) -> None:
|
89 |
+
"""
|
90 |
+
Main function to load the model and perform interactive or batch text generation.
|
91 |
+
|
92 |
+
Args:
|
93 |
+
ckpt_path (str): Path to the model checkpoint directory.
|
94 |
+
config (str): Path to the model configuration file.
|
95 |
+
input_file (str, optional): Path to a file containing input prompts. Defaults to "".
|
96 |
+
interactive (bool, optional): Whether to run in interactive mode. Defaults to True.
|
97 |
+
max_new_tokens (int, optional): Maximum number of new tokens to generate. Defaults to 100.
|
98 |
+
temperature (float, optional): Temperature for sampling. Defaults to 1.0.
|
99 |
+
"""
|
100 |
+
world_size = int(os.getenv("WORLD_SIZE", "1"))
|
101 |
+
rank = int(os.getenv("RANK", "0"))
|
102 |
+
local_rank = int(os.getenv("LOCAL_RANK", "0"))
|
103 |
+
if world_size > 1:
|
104 |
+
dist.init_process_group("nccl")
|
105 |
+
global print
|
106 |
+
if rank != 0:
|
107 |
+
print = lambda *_, **__: None
|
108 |
+
torch.cuda.set_device(local_rank)
|
109 |
+
torch.set_default_dtype(torch.bfloat16)
|
110 |
+
torch.set_num_threads(8)
|
111 |
+
torch.manual_seed(965)
|
112 |
+
with open(config) as f:
|
113 |
+
args = ModelArgs(**json.load(f))
|
114 |
+
print(args)
|
115 |
+
with torch.device("cuda"):
|
116 |
+
model = Transformer(args)
|
117 |
+
tokenizer = AutoTokenizer.from_pretrained(ckpt_path)
|
118 |
+
tokenizer.decode(generate(model, [tokenizer.encode("DeepSeek")], 2, -1, 1.)[0])
|
119 |
+
load_model(model, os.path.join(ckpt_path, f"model{rank}-mp{world_size}.safetensors"))
|
120 |
+
|
121 |
+
if interactive:
|
122 |
+
messages = []
|
123 |
+
while True:
|
124 |
+
if world_size == 1:
|
125 |
+
prompt = input(">>> ")
|
126 |
+
elif rank == 0:
|
127 |
+
prompt = input(">>> ")
|
128 |
+
objects = [prompt]
|
129 |
+
dist.broadcast_object_list(objects, 0)
|
130 |
+
else:
|
131 |
+
objects = [None]
|
132 |
+
dist.broadcast_object_list(objects, 0)
|
133 |
+
prompt = objects[0]
|
134 |
+
if prompt == "/exit":
|
135 |
+
break
|
136 |
+
elif prompt == "/clear":
|
137 |
+
messages.clear()
|
138 |
+
continue
|
139 |
+
messages.append({"role": "user", "content": prompt})
|
140 |
+
prompt_tokens = tokenizer.apply_chat_template(messages, add_generation_prompt=True)
|
141 |
+
completion_tokens = generate(model, [prompt_tokens], max_new_tokens, tokenizer.eos_token_id, temperature)
|
142 |
+
completion = tokenizer.decode(completion_tokens[0], skip_special_tokens=True)
|
143 |
+
print(completion)
|
144 |
+
messages.append({"role": "assistant", "content": completion})
|
145 |
+
else:
|
146 |
+
with open(input_file) as f:
|
147 |
+
prompts = [line.strip() for line in f.readlines()]
|
148 |
+
assert len(prompts) <= args.max_batch_size
|
149 |
+
prompt_tokens = [tokenizer.apply_chat_template([{"role": "user", "content": prompt}], add_generation_prompt=True) for prompt in prompts]
|
150 |
+
completion_tokens = generate(model, prompt_tokens, max_new_tokens, tokenizer.eos_token_id, temperature)
|
151 |
+
completions = tokenizer.batch_decode(completion_tokens, skip_special_tokens=True)
|
152 |
+
for prompt, completion in zip(prompts, completions):
|
153 |
+
print("Prompt:", prompt)
|
154 |
+
print("Completion:", completion)
|
155 |
+
print()
|
156 |
+
|
157 |
+
if world_size > 1:
|
158 |
+
dist.destroy_process_group()
|
159 |
+
|
160 |
+
|
161 |
+
if __name__ == "__main__":
|
162 |
+
"""
|
163 |
+
Command-line interface for distributed text generation.
|
164 |
+
|
165 |
+
Arguments:
|
166 |
+
--ckpt-path (str): Path to the model checkpoint directory.
|
167 |
+
--config (str): Path to the model configuration file.
|
168 |
+
--input-file (str, optional): File containing prompts for batch processing.
|
169 |
+
--interactive (bool, optional): Enable interactive mode for generating text.
|
170 |
+
--max-new-tokens (int, optional): Maximum number of new tokens to generate. Defaults to 200.
|
171 |
+
--temperature (float, optional): Temperature for sampling. Defaults to 0.2.
|
172 |
+
|
173 |
+
Raises:
|
174 |
+
AssertionError: If neither input-file nor interactive mode is specified.
|
175 |
+
"""
|
176 |
+
parser = ArgumentParser()
|
177 |
+
parser.add_argument("--ckpt-path", type=str, required=True)
|
178 |
+
parser.add_argument("--config", type=str, required=True)
|
179 |
+
parser.add_argument("--input-file", type=str, default="")
|
180 |
+
parser.add_argument("--interactive", action="store_true")
|
181 |
+
parser.add_argument("--max-new-tokens", type=int, default=200)
|
182 |
+
parser.add_argument("--temperature", type=float, default=0.2)
|
183 |
+
args = parser.parse_args()
|
184 |
+
assert args.input_file or args.interactive
|
185 |
+
main(args.ckpt_path, args.config, args.input_file, args.interactive, args.max_new_tokens, args.temperature)
|
kernel.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import triton
|
5 |
+
import triton.language as tl
|
6 |
+
from triton import Config
|
7 |
+
|
8 |
+
|
9 |
+
@triton.jit
|
10 |
+
def act_quant_kernel(x_ptr, y_ptr, s_ptr, BLOCK_SIZE: tl.constexpr):
|
11 |
+
"""
|
12 |
+
Quantizes the input tensor `x_ptr` and stores the result in `y_ptr` and the scaling factor in `s_ptr`.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
x_ptr (triton.Pointer): Pointer to the input tensor.
|
16 |
+
y_ptr (triton.Pointer): Pointer to the output tensor where quantized values will be stored.
|
17 |
+
s_ptr (triton.Pointer): Pointer to the output tensor where scaling factors will be stored.
|
18 |
+
BLOCK_SIZE (tl.constexpr): The size of the block to be processed by each program instance.
|
19 |
+
|
20 |
+
Returns:
|
21 |
+
None
|
22 |
+
"""
|
23 |
+
pid = tl.program_id(axis=0)
|
24 |
+
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
25 |
+
x = tl.load(x_ptr + offs).to(tl.float32)
|
26 |
+
s = tl.max(tl.abs(x)) / 448.
|
27 |
+
y = x / s
|
28 |
+
y = y.to(y_ptr.dtype.element_ty)
|
29 |
+
tl.store(y_ptr + offs, y)
|
30 |
+
tl.store(s_ptr + pid, s)
|
31 |
+
|
32 |
+
|
33 |
+
def act_quant(x: torch.Tensor, block_size: int = 128) -> Tuple[torch.Tensor, torch.Tensor]:
|
34 |
+
"""
|
35 |
+
Quantizes the input tensor `x` using block-wise quantization.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
x (torch.Tensor): The input tensor to be quantized. Must be contiguous and its last dimension size must be divisible by `block_size`.
|
39 |
+
block_size (int, optional): The size of the blocks to be used for quantization. Default is 128.
|
40 |
+
|
41 |
+
Returns:
|
42 |
+
Tuple[torch.Tensor, torch.Tensor]: A tuple containing:
|
43 |
+
- The quantized tensor with dtype `torch.float8_e4m3fn`.
|
44 |
+
- A tensor of scaling factors with dtype `torch.float32`.
|
45 |
+
"""
|
46 |
+
assert x.is_contiguous()
|
47 |
+
assert x.size(-1) % block_size == 0
|
48 |
+
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
|
49 |
+
s = x.new_empty(*x.size()[:-1], x.size(-1) // block_size, dtype=torch.float32)
|
50 |
+
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']), )
|
51 |
+
act_quant_kernel[grid](x, y, s, BLOCK_SIZE=block_size)
|
52 |
+
return y, s
|
53 |
+
|
54 |
+
|
55 |
+
@triton.jit
|
56 |
+
def weight_dequant_kernel(x_ptr, s_ptr, y_ptr, M, N, BLOCK_SIZE: tl.constexpr):
|
57 |
+
"""
|
58 |
+
Dequantizes weights using the provided scaling factors and stores the result.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
x_ptr (tl.pointer): Pointer to the quantized weights.
|
62 |
+
s_ptr (tl.pointer): Pointer to the scaling factors.
|
63 |
+
y_ptr (tl.pointer): Pointer to the output buffer for dequantized weights.
|
64 |
+
M (int): Number of rows in the weight matrix.
|
65 |
+
N (int): Number of columns in the weight matrix.
|
66 |
+
BLOCK_SIZE (tl.constexpr): Size of the block for tiling.
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
None
|
70 |
+
"""
|
71 |
+
pid_m = tl.program_id(axis=0)
|
72 |
+
pid_n = tl.program_id(axis=1)
|
73 |
+
n = tl.cdiv(N, BLOCK_SIZE)
|
74 |
+
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
75 |
+
offs_n = pid_n * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
76 |
+
offs = offs_m[:, None] * N + offs_n[None, :]
|
77 |
+
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
78 |
+
x = tl.load(x_ptr + offs, mask=mask).to(tl.float32)
|
79 |
+
s = tl.load(s_ptr + pid_m * n + pid_n)
|
80 |
+
y = x * s
|
81 |
+
tl.store(y_ptr + offs, y, mask=mask)
|
82 |
+
|
83 |
+
|
84 |
+
def weight_dequant(x: torch.Tensor, s: torch.Tensor, block_size: int = 128) -> torch.Tensor:
|
85 |
+
"""
|
86 |
+
Dequantizes the given weight tensor using the provided scale tensor.
|
87 |
+
|
88 |
+
Args:
|
89 |
+
x (torch.Tensor): The quantized weight tensor of shape (M, N).
|
90 |
+
s (torch.Tensor): The scale tensor of shape (M, N).
|
91 |
+
block_size (int, optional): The block size to use for dequantization. Defaults to 128.
|
92 |
+
|
93 |
+
Returns:
|
94 |
+
torch.Tensor: The dequantized weight tensor of the same shape as `x`.
|
95 |
+
|
96 |
+
Raises:
|
97 |
+
AssertionError: If `x` or `s` are not contiguous or if their dimensions are not 2.
|
98 |
+
"""
|
99 |
+
assert x.is_contiguous() and s.is_contiguous()
|
100 |
+
assert x.dim() == 2 and s.dim() == 2
|
101 |
+
M, N = x.size()
|
102 |
+
y = torch.empty_like(x, dtype=torch.get_default_dtype())
|
103 |
+
grid = lambda meta: (triton.cdiv(M, meta['BLOCK_SIZE']), triton.cdiv(N, meta['BLOCK_SIZE']))
|
104 |
+
weight_dequant_kernel[grid](x, s, y, M, N, BLOCK_SIZE=block_size)
|
105 |
+
return y
|
106 |
+
|
107 |
+
|
108 |
+
fp8_gemm_configs = [
|
109 |
+
Config({'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': 128}, num_stages=num_stages, num_warps=8)
|
110 |
+
for block_m in [16, 32, 64] for block_n in [32, 64, 128] for num_stages in [3, 4, 5, 6]
|
111 |
+
]
|
112 |
+
|
113 |
+
@triton.autotune(configs=fp8_gemm_configs, key=['N', 'K'])
|
114 |
+
@triton.jit
|
115 |
+
def fp8_gemm_kernel(a_ptr, b_ptr, c_ptr,
|
116 |
+
a_s_ptr, b_s_ptr,
|
117 |
+
M, N: tl.constexpr, K: tl.constexpr,
|
118 |
+
BLOCK_SIZE_M: tl.constexpr,
|
119 |
+
BLOCK_SIZE_N: tl.constexpr,
|
120 |
+
BLOCK_SIZE_K: tl.constexpr):
|
121 |
+
"""
|
122 |
+
Performs a matrix multiplication operation on FP8 matrices with scaling factors.
|
123 |
+
|
124 |
+
Args:
|
125 |
+
a_ptr (tl.tensor): Pointer to the first input matrix A.
|
126 |
+
b_ptr (tl.tensor): Pointer to the second input matrix B.
|
127 |
+
c_ptr (tl.tensor): Pointer to the output matrix C.
|
128 |
+
a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A.
|
129 |
+
b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B.
|
130 |
+
M (int): Number of rows in matrix A and C.
|
131 |
+
N (tl.constexpr): Number of columns in matrix B and C.
|
132 |
+
K (tl.constexpr): Number of columns in matrix A and rows in matrix B.
|
133 |
+
BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension.
|
134 |
+
BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension.
|
135 |
+
BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension.
|
136 |
+
|
137 |
+
Returns:
|
138 |
+
None
|
139 |
+
"""
|
140 |
+
pid_m = tl.program_id(axis=0)
|
141 |
+
pid_n = tl.program_id(axis=1)
|
142 |
+
k = tl.cdiv(K, BLOCK_SIZE_K)
|
143 |
+
offs_m = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
|
144 |
+
offs_n = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
|
145 |
+
offs_k = tl.arange(0, BLOCK_SIZE_K)
|
146 |
+
a_ptrs = a_ptr + offs_m[:, None] * K + offs_k[None, :]
|
147 |
+
b_ptrs = b_ptr + offs_n[None, :] * K + offs_k[:, None]
|
148 |
+
a_s_ptrs = a_s_ptr + offs_m * k
|
149 |
+
b_s_ptrs = b_s_ptr + (offs_n // BLOCK_SIZE_K) * k
|
150 |
+
|
151 |
+
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
|
152 |
+
for i in range(k):
|
153 |
+
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - i * BLOCK_SIZE_K, other=0.0)
|
154 |
+
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - i * BLOCK_SIZE_K, other=0.0)
|
155 |
+
a_s = tl.load(a_s_ptrs)
|
156 |
+
b_s = tl.load(b_s_ptrs)
|
157 |
+
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
|
158 |
+
a_ptrs += BLOCK_SIZE_K
|
159 |
+
b_ptrs += BLOCK_SIZE_K
|
160 |
+
a_s_ptrs += 1
|
161 |
+
b_s_ptrs += 1
|
162 |
+
c = accumulator.to(c_ptr.dtype.element_ty)
|
163 |
+
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
|
164 |
+
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
|
165 |
+
c_ptrs = c_ptr + offs_m[:, None] * N + offs_n[None, :]
|
166 |
+
mask = (offs_m[:, None] < M) & (offs_n[None, :] < N)
|
167 |
+
tl.store(c_ptrs, c, mask=mask)
|
168 |
+
|
169 |
+
|
170 |
+
def fp8_gemm(a: torch.Tensor, a_s: torch.Tensor, b: torch.Tensor, b_s: torch.Tensor):
|
171 |
+
"""
|
172 |
+
Perform a matrix multiplication using FP8 precision.
|
173 |
+
|
174 |
+
Args:
|
175 |
+
a (torch.Tensor): The first input matrix, must be contiguous.
|
176 |
+
a_s (torch.Tensor): The scaling factor for the first input matrix, must be contiguous.
|
177 |
+
b (torch.Tensor): The second input matrix, must be contiguous.
|
178 |
+
b_s (torch.Tensor): The scaling factor for the second input matrix, must be contiguous.
|
179 |
+
|
180 |
+
Returns:
|
181 |
+
torch.Tensor: The result of the matrix multiplication.
|
182 |
+
"""
|
183 |
+
assert a.is_contiguous() and b.is_contiguous()
|
184 |
+
assert a_s.is_contiguous() and b_s.is_contiguous()
|
185 |
+
K = a.size(-1)
|
186 |
+
M = a.numel() // K
|
187 |
+
N = b.size(0)
|
188 |
+
c = a.new_empty(*a.size()[:-1], N, dtype=torch.get_default_dtype())
|
189 |
+
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']), triton.cdiv(N, META['BLOCK_SIZE_N']))
|
190 |
+
fp8_gemm_kernel[grid](a, b, c, a_s, b_s, M, N, K)
|
191 |
+
return c
|
model.py
ADDED
@@ -0,0 +1,804 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from typing import Tuple, Optional, Literal
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch import nn
|
7 |
+
import torch.nn.functional as F
|
8 |
+
import torch.distributed as dist
|
9 |
+
|
10 |
+
from kernel import act_quant, weight_dequant, fp8_gemm
|
11 |
+
|
12 |
+
|
13 |
+
world_size = 1
|
14 |
+
rank = 0
|
15 |
+
block_size = 128
|
16 |
+
gemm_impl: Literal["bf16", "fp8"] = "bf16"
|
17 |
+
attn_impl: Literal["naive", "absorb"] = "absorb"
|
18 |
+
|
19 |
+
@dataclass
|
20 |
+
class ModelArgs:
|
21 |
+
"""
|
22 |
+
Data class for defining model arguments and hyperparameters.
|
23 |
+
|
24 |
+
Attributes:
|
25 |
+
max_batch_size (int): Maximum batch size.
|
26 |
+
max_seq_len (int): Maximum sequence length.
|
27 |
+
dtype (Literal["bf16", "fp8"]): Data type for computations.
|
28 |
+
vocab_size (int): Vocabulary size.
|
29 |
+
dim (int): Model dimension.
|
30 |
+
inter_dim (int): Intermediate dimension for MLP layers.
|
31 |
+
moe_inter_dim (int): Intermediate dimension for MoE layers.
|
32 |
+
n_layers (int): Number of transformer layers.
|
33 |
+
n_dense_layers (int): Number of dense layers in the model.
|
34 |
+
n_heads (int): Number of attention heads.
|
35 |
+
n_routed_experts (int): Number of routed experts for MoE layers.
|
36 |
+
n_shared_experts (int): Number of shared experts for MoE layers.
|
37 |
+
n_activated_experts (int): Number of activated experts in MoE layers.
|
38 |
+
n_expert_groups (int): Number of expert groups.
|
39 |
+
n_limited_groups (int): Number of limited groups for MoE routing.
|
40 |
+
score_func (Literal["softmax", "sigmoid"]): Scoring function for MoE routing.
|
41 |
+
route_scale (float): Scaling factor for routing scores.
|
42 |
+
q_lora_rank (int): LoRA rank for query projections.
|
43 |
+
kv_lora_rank (int): LoRA rank for key-value projections.
|
44 |
+
qk_nope_head_dim (int): Dimension for query-key projections without positional embeddings.
|
45 |
+
qk_rope_head_dim (int): Dimension for query-key projections with rotary embeddings.
|
46 |
+
v_head_dim (int): Dimension for value projections.
|
47 |
+
original_seq_len (int): Original sequence length.
|
48 |
+
rope_theta (float): Base for rotary positional encoding.
|
49 |
+
rope_factor (float): Scaling factor for extended sequence lengths.
|
50 |
+
beta_fast (int): Fast beta correction factor.
|
51 |
+
beta_slow (int): Slow beta correction factor.
|
52 |
+
mscale (float): Scaling factor for extended attention.
|
53 |
+
"""
|
54 |
+
max_batch_size: int = 8
|
55 |
+
max_seq_len: int = 4096 * 4
|
56 |
+
dtype: Literal["bf16", "fp8"] = "bf16"
|
57 |
+
vocab_size: int = 102400
|
58 |
+
dim: int = 2048
|
59 |
+
inter_dim: int = 10944
|
60 |
+
moe_inter_dim: int = 1408
|
61 |
+
n_layers: int = 27
|
62 |
+
n_dense_layers: int = 1
|
63 |
+
n_heads: int = 16
|
64 |
+
# moe
|
65 |
+
n_routed_experts: int = 64
|
66 |
+
n_shared_experts: int = 2
|
67 |
+
n_activated_experts: int = 6
|
68 |
+
n_expert_groups: int = 1
|
69 |
+
n_limited_groups: int = 1
|
70 |
+
score_func: Literal["softmax", "sigmoid"] = "softmax"
|
71 |
+
route_scale: float = 1.
|
72 |
+
# mla
|
73 |
+
q_lora_rank: int = 0
|
74 |
+
kv_lora_rank: int = 512
|
75 |
+
qk_nope_head_dim: int = 128
|
76 |
+
qk_rope_head_dim: int = 64
|
77 |
+
v_head_dim: int = 128
|
78 |
+
# yarn
|
79 |
+
original_seq_len: int = 4096
|
80 |
+
rope_theta: float = 10000.0
|
81 |
+
rope_factor: float = 40
|
82 |
+
beta_fast: int = 32
|
83 |
+
beta_slow: int = 1
|
84 |
+
mscale: float = 1.
|
85 |
+
|
86 |
+
|
87 |
+
class ParallelEmbedding(nn.Module):
|
88 |
+
"""
|
89 |
+
Embedding layer with parallelism support across distributed processes.
|
90 |
+
|
91 |
+
Args:
|
92 |
+
vocab_size (int): Vocabulary size.
|
93 |
+
dim (int): Embedding dimension.
|
94 |
+
"""
|
95 |
+
def __init__(self, vocab_size: int, dim: int):
|
96 |
+
super().__init__()
|
97 |
+
self.vocab_size = vocab_size
|
98 |
+
self.dim = dim
|
99 |
+
assert vocab_size % world_size == 0
|
100 |
+
self.part_vocab_size = (vocab_size // world_size)
|
101 |
+
self.vocab_start_idx = rank * self.part_vocab_size
|
102 |
+
self.vocab_end_idx = self.vocab_start_idx + self.part_vocab_size
|
103 |
+
self.weight = nn.Parameter(torch.empty(self.part_vocab_size, self.dim))
|
104 |
+
|
105 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
106 |
+
"""
|
107 |
+
Forward pass for parallel embedding layer.
|
108 |
+
|
109 |
+
Args:
|
110 |
+
x (torch.Tensor): Input tensor containing token indices.
|
111 |
+
|
112 |
+
Returns:
|
113 |
+
torch.Tensor: Embedded representations.
|
114 |
+
|
115 |
+
Raises:
|
116 |
+
ValueError: If `world_size` is not defined.
|
117 |
+
"""
|
118 |
+
if world_size > 1:
|
119 |
+
mask = (x < self.vocab_start_idx) | (x >= self.vocab_end_idx)
|
120 |
+
x = x - self.vocab_start_idx
|
121 |
+
x[mask] = 0
|
122 |
+
y = F.embedding(x, self.weight)
|
123 |
+
if world_size > 1:
|
124 |
+
y[mask] = 0
|
125 |
+
dist.all_reduce(y)
|
126 |
+
return y
|
127 |
+
|
128 |
+
|
129 |
+
def linear(x: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
130 |
+
"""
|
131 |
+
Applies a linear transformation to the incoming data: y = xA^T + b.
|
132 |
+
This function supports specialized implementations based on quantization
|
133 |
+
and tensor formats.
|
134 |
+
|
135 |
+
Args:
|
136 |
+
x (torch.Tensor): The input tensor.
|
137 |
+
weight (torch.Tensor): The weight tensor. It may be quantized and
|
138 |
+
requires dequantization for certain cases.
|
139 |
+
bias (Optional[torch.Tensor]): The bias tensor to be added. Default is None.
|
140 |
+
|
141 |
+
Returns:
|
142 |
+
torch.Tensor: The result of the linear transformation, which may involve
|
143 |
+
quantization-aware computations depending on the input parameters.
|
144 |
+
|
145 |
+
Notes:
|
146 |
+
- If `weight` is quantized (e.g., `element_size() > 1`), a dequantized version
|
147 |
+
is used for computation.
|
148 |
+
- If `gemm_impl == "bf16"`, dequantization and a `bf16` GEMM operation are applied.
|
149 |
+
- For other cases, the function applies quantization to `x` and uses `fp8_gemm` for computation.
|
150 |
+
"""
|
151 |
+
if weight.element_size() > 1:
|
152 |
+
return F.linear(x, weight, bias)
|
153 |
+
elif gemm_impl == "bf16":
|
154 |
+
weight = weight_dequant(weight, weight.scale)
|
155 |
+
return F.linear(x, weight, bias)
|
156 |
+
else:
|
157 |
+
x, scale = act_quant(x, block_size)
|
158 |
+
y = fp8_gemm(x, scale, weight, weight.scale)
|
159 |
+
if bias is not None:
|
160 |
+
y += bias
|
161 |
+
return y
|
162 |
+
|
163 |
+
|
164 |
+
class Linear(nn.Module):
|
165 |
+
"""
|
166 |
+
Custom linear layer with support for quantized weights and optional bias.
|
167 |
+
|
168 |
+
Args:
|
169 |
+
in_features (int): Number of input features.
|
170 |
+
out_features (int): Number of output features.
|
171 |
+
bias (bool): Whether to include a bias term. Defaults to False.
|
172 |
+
dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`.
|
173 |
+
"""
|
174 |
+
dtype = torch.bfloat16
|
175 |
+
|
176 |
+
def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
|
177 |
+
super().__init__()
|
178 |
+
self.in_features = in_features
|
179 |
+
self.out_features = out_features
|
180 |
+
self.weight = nn.Parameter(torch.empty(out_features, in_features, dtype=dtype or Linear.dtype))
|
181 |
+
if self.weight.element_size() == 1:
|
182 |
+
scale_out_features = (out_features + block_size - 1) // block_size
|
183 |
+
scale_in_features = (in_features + block_size - 1) // block_size
|
184 |
+
self.weight.scale = self.scale = nn.Parameter(torch.empty(scale_out_features, scale_in_features, dtype=torch.float32))
|
185 |
+
else:
|
186 |
+
self.register_parameter("scale", None)
|
187 |
+
if bias:
|
188 |
+
self.bias = nn.Parameter(torch.empty(self.part_out_features))
|
189 |
+
else:
|
190 |
+
self.register_parameter("bias", None)
|
191 |
+
|
192 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
193 |
+
"""
|
194 |
+
Forward pass for the custom linear layer.
|
195 |
+
|
196 |
+
Args:
|
197 |
+
x (torch.Tensor): Input tensor.
|
198 |
+
|
199 |
+
Returns:
|
200 |
+
torch.Tensor: Transformed tensor after linear computation.
|
201 |
+
"""
|
202 |
+
return linear(x, self.weight, self.bias)
|
203 |
+
|
204 |
+
|
205 |
+
class ColumnParallelLinear(Linear):
|
206 |
+
"""
|
207 |
+
Linear layer with column parallelism, splitting output features across distributed processes.
|
208 |
+
|
209 |
+
Args:
|
210 |
+
in_features (int): Number of input features.
|
211 |
+
out_features (int): Total number of output features.
|
212 |
+
bias (bool): Whether to include a bias term. Defaults to False.
|
213 |
+
dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`.
|
214 |
+
"""
|
215 |
+
def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
|
216 |
+
assert out_features % world_size == 0
|
217 |
+
self.part_out_features = out_features // world_size
|
218 |
+
super().__init__(in_features, self.part_out_features, bias, dtype)
|
219 |
+
|
220 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
221 |
+
"""
|
222 |
+
Forward pass for column parallel linear layer.
|
223 |
+
|
224 |
+
Args:
|
225 |
+
x (torch.Tensor): Input tensor.
|
226 |
+
|
227 |
+
Returns:
|
228 |
+
torch.Tensor: Transformed tensor with column-parallel computation.
|
229 |
+
"""
|
230 |
+
y = linear(x, self.weight, self.bias)
|
231 |
+
return y
|
232 |
+
|
233 |
+
|
234 |
+
class RowParallelLinear(Linear):
|
235 |
+
"""
|
236 |
+
Linear layer with row parallelism, splitting input features across distributed processes.
|
237 |
+
|
238 |
+
Args:
|
239 |
+
in_features (int): Total number of input features.
|
240 |
+
out_features (int): Number of output features.
|
241 |
+
bias (bool): Whether to include a bias term. Defaults to False.
|
242 |
+
dtype (optional): Data type for the layer. Defaults to `torch.bfloat16`.
|
243 |
+
"""
|
244 |
+
def __init__(self, in_features: int, out_features: int, bias: bool = False, dtype = None):
|
245 |
+
assert in_features % world_size == 0
|
246 |
+
self.part_in_features = in_features // world_size
|
247 |
+
super().__init__(self.part_in_features, out_features, bias, dtype)
|
248 |
+
|
249 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
250 |
+
"""
|
251 |
+
Forward pass for row parallel linear layer.
|
252 |
+
|
253 |
+
Args:
|
254 |
+
x (torch.Tensor): Input tensor.
|
255 |
+
|
256 |
+
Returns:
|
257 |
+
torch.Tensor: Transformed tensor with row-parallel computation.
|
258 |
+
"""
|
259 |
+
y = linear(x, self.weight)
|
260 |
+
if world_size > 1:
|
261 |
+
dist.all_reduce(y)
|
262 |
+
if self.bias is not None:
|
263 |
+
y += self.bias
|
264 |
+
return y
|
265 |
+
|
266 |
+
|
267 |
+
class RMSNorm(nn.Module):
|
268 |
+
"""
|
269 |
+
Root Mean Square Layer Normalization (RMSNorm).
|
270 |
+
|
271 |
+
Args:
|
272 |
+
dim (int): Dimension of the input tensor.
|
273 |
+
eps (float): Epsilon value for numerical stability. Defaults to 1e-6.
|
274 |
+
"""
|
275 |
+
def __init__(self, dim: int, eps: float = 1e-6):
|
276 |
+
super().__init__()
|
277 |
+
self.dim = dim
|
278 |
+
self.eps = eps
|
279 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
280 |
+
|
281 |
+
def forward(self, x: torch.Tensor):
|
282 |
+
"""
|
283 |
+
Forward pass for RMSNorm.
|
284 |
+
|
285 |
+
Args:
|
286 |
+
x (torch.Tensor): Input tensor.
|
287 |
+
|
288 |
+
Returns:
|
289 |
+
torch.Tensor: Normalized tensor with the same shape as input.
|
290 |
+
"""
|
291 |
+
return F.rms_norm(x, (self.dim,), self.weight, self.eps)
|
292 |
+
|
293 |
+
|
294 |
+
def precompute_freqs_cis(args: ModelArgs) -> torch.Tensor:
|
295 |
+
"""
|
296 |
+
Precomputes frequency-based complex exponential values for rotary positional embeddings.
|
297 |
+
|
298 |
+
Args:
|
299 |
+
args (ModelArgs): Model arguments containing positional embedding parameters.
|
300 |
+
|
301 |
+
Returns:
|
302 |
+
torch.Tensor: Precomputed complex exponential values for positional embeddings.
|
303 |
+
"""
|
304 |
+
dim = args.qk_rope_head_dim
|
305 |
+
seqlen = args.max_seq_len
|
306 |
+
beta_fast = args.beta_fast
|
307 |
+
beta_slow = args.beta_slow
|
308 |
+
base = args.rope_theta
|
309 |
+
factor = args.rope_factor
|
310 |
+
|
311 |
+
def find_correction_dim(num_rotations, dim, base, max_seq_len):
|
312 |
+
"""
|
313 |
+
Computes the correction dimension for a given number of rotations in the rotary positional embedding.
|
314 |
+
|
315 |
+
Args:
|
316 |
+
num_rotations (float): Number of rotations to compute the correction for.
|
317 |
+
dim (int): Dimensionality of the embedding space.
|
318 |
+
base (float): Base value for the exponential computation.
|
319 |
+
max_seq_len (int): Maximum sequence length.
|
320 |
+
|
321 |
+
Returns:
|
322 |
+
float: The correction dimension based on the input parameters.
|
323 |
+
"""
|
324 |
+
return dim * math.log(max_seq_len / (num_rotations * 2 * math.pi)) / (2 * math.log(base))
|
325 |
+
|
326 |
+
def find_correction_range(low_rot, high_rot, dim, base, max_seq_len):
|
327 |
+
"""
|
328 |
+
Computes the range of correction dimensions for rotary positional embeddings.
|
329 |
+
|
330 |
+
Args:
|
331 |
+
low_rot (float): Lower bound for the number of rotations.
|
332 |
+
high_rot (float): Upper bound for the number of rotations.
|
333 |
+
dim (int): Dimensionality of the embedding space.
|
334 |
+
base (float): Base value for the exponential computation.
|
335 |
+
max_seq_len (int): Maximum sequence length.
|
336 |
+
|
337 |
+
Returns:
|
338 |
+
Tuple[int, int]: The range of correction dimensions (low, high), clamped to valid indices.
|
339 |
+
"""
|
340 |
+
low = math.floor(find_correction_dim(low_rot, dim, base, max_seq_len))
|
341 |
+
high = math.ceil(find_correction_dim(high_rot, dim, base, max_seq_len))
|
342 |
+
return max(low, 0), min(high, dim-1)
|
343 |
+
|
344 |
+
def linear_ramp_factor(min, max, dim):
|
345 |
+
"""
|
346 |
+
Computes a linear ramp function used to smooth values between a minimum and maximum range.
|
347 |
+
|
348 |
+
Args:
|
349 |
+
min (float): Minimum value for the ramp function.
|
350 |
+
max (float): Maximum value for the ramp function.
|
351 |
+
dim (int): Dimensionality of the ramp tensor.
|
352 |
+
|
353 |
+
Returns:
|
354 |
+
torch.Tensor: A tensor of shape (dim,) with values linearly interpolated between 0 and 1,
|
355 |
+
clamped to the range [0, 1].
|
356 |
+
"""
|
357 |
+
if min == max:
|
358 |
+
max += 0.001
|
359 |
+
linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
|
360 |
+
ramp_func = torch.clamp(linear_func, 0, 1)
|
361 |
+
return ramp_func
|
362 |
+
|
363 |
+
freqs = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.float32) / dim))
|
364 |
+
if seqlen > args.original_seq_len:
|
365 |
+
low, high = find_correction_range(beta_fast, beta_slow, dim, base, args.original_seq_len)
|
366 |
+
smooth = 1 - linear_ramp_factor(low, high, dim // 2)
|
367 |
+
freqs = freqs / factor * (1 - smooth) + freqs * smooth
|
368 |
+
|
369 |
+
t = torch.arange(seqlen)
|
370 |
+
freqs = torch.outer(t, freqs)
|
371 |
+
freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
|
372 |
+
return freqs_cis
|
373 |
+
|
374 |
+
|
375 |
+
def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor:
|
376 |
+
"""
|
377 |
+
Applies rotary positional embeddings to the input tensor.
|
378 |
+
|
379 |
+
Args:
|
380 |
+
x (torch.Tensor): Input tensor with positional embeddings to be applied.
|
381 |
+
freqs_cis (torch.Tensor): Precomputed complex exponential values for positional embeddings.
|
382 |
+
|
383 |
+
Returns:
|
384 |
+
torch.Tensor: Tensor with rotary embeddings applied.
|
385 |
+
"""
|
386 |
+
dtype = x.dtype
|
387 |
+
x = torch.view_as_complex(x.float().view(*x.shape[:-1], -1, 2))
|
388 |
+
freqs_cis = freqs_cis.view(1, x.size(1), 1, x.size(-1))
|
389 |
+
y = torch.view_as_real(x * freqs_cis).flatten(3)
|
390 |
+
return y.to(dtype)
|
391 |
+
|
392 |
+
|
393 |
+
class MLA(nn.Module):
|
394 |
+
"""
|
395 |
+
Multi-Headed Attention Layer (MLA).
|
396 |
+
|
397 |
+
Attributes:
|
398 |
+
dim (int): Dimensionality of the input features.
|
399 |
+
n_heads (int): Number of attention heads.
|
400 |
+
n_local_heads (int): Number of local attention heads for distributed systems.
|
401 |
+
q_lora_rank (int): Rank for low-rank query projection.
|
402 |
+
kv_lora_rank (int): Rank for low-rank key/value projection.
|
403 |
+
qk_nope_head_dim (int): Dimensionality of non-positional query/key projections.
|
404 |
+
qk_rope_head_dim (int): Dimensionality of rotary-positional query/key projections.
|
405 |
+
qk_head_dim (int): Total dimensionality of query/key projections.
|
406 |
+
v_head_dim (int): Dimensionality of value projections.
|
407 |
+
softmax_scale (float): Scaling factor for softmax in attention computation.
|
408 |
+
"""
|
409 |
+
def __init__(self, args: ModelArgs):
|
410 |
+
super().__init__()
|
411 |
+
self.dim = args.dim
|
412 |
+
self.n_heads = args.n_heads
|
413 |
+
self.n_local_heads = args.n_heads // world_size
|
414 |
+
self.q_lora_rank = args.q_lora_rank
|
415 |
+
self.kv_lora_rank = args.kv_lora_rank
|
416 |
+
self.qk_nope_head_dim = args.qk_nope_head_dim
|
417 |
+
self.qk_rope_head_dim = args.qk_rope_head_dim
|
418 |
+
self.qk_head_dim = args.qk_nope_head_dim + args.qk_rope_head_dim
|
419 |
+
self.v_head_dim = args.v_head_dim
|
420 |
+
|
421 |
+
if self.q_lora_rank == 0:
|
422 |
+
self.wq = ColumnParallelLinear(self.dim, self.n_heads * self.qk_head_dim)
|
423 |
+
else:
|
424 |
+
self.wq_a = Linear(self.dim, self.q_lora_rank)
|
425 |
+
self.q_norm = RMSNorm(self.q_lora_rank)
|
426 |
+
self.wq_b = ColumnParallelLinear(self.q_lora_rank, self.n_heads * self.qk_head_dim)
|
427 |
+
self.wkv_a = Linear(self.dim, self.kv_lora_rank + self.qk_rope_head_dim)
|
428 |
+
self.kv_norm = RMSNorm(self.kv_lora_rank)
|
429 |
+
self.wkv_b = ColumnParallelLinear(self.kv_lora_rank, self.n_heads * (self.qk_nope_head_dim + self.v_head_dim))
|
430 |
+
self.wo = RowParallelLinear(self.n_heads * self.v_head_dim, self.dim)
|
431 |
+
self.softmax_scale = self.qk_head_dim ** -0.5
|
432 |
+
if args.max_seq_len > args.original_seq_len:
|
433 |
+
mscale = 0.1 * args.mscale * math.log(args.rope_factor) + 1.0
|
434 |
+
self.softmax_scale = self.softmax_scale * mscale * mscale
|
435 |
+
|
436 |
+
if attn_impl == "naive":
|
437 |
+
self.register_buffer("k_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.qk_head_dim), persistent=False)
|
438 |
+
self.register_buffer("v_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.v_head_dim), persistent=False)
|
439 |
+
else:
|
440 |
+
self.register_buffer("kv_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.kv_lora_rank), persistent=False)
|
441 |
+
self.register_buffer("pe_cache", torch.zeros(args.max_batch_size, args.max_seq_len, self.qk_rope_head_dim), persistent=False)
|
442 |
+
|
443 |
+
def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]):
|
444 |
+
"""
|
445 |
+
Forward pass for the Multi-Headed Attention Layer (MLA).
|
446 |
+
|
447 |
+
Args:
|
448 |
+
x (torch.Tensor): Input tensor of shape (batch_size, seq_len, dim).
|
449 |
+
start_pos (int): Starting position in the sequence for caching.
|
450 |
+
freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings.
|
451 |
+
mask (Optional[torch.Tensor]): Mask tensor to exclude certain positions from attention.
|
452 |
+
|
453 |
+
Returns:
|
454 |
+
torch.Tensor: Output tensor with the same shape as the input.
|
455 |
+
"""
|
456 |
+
bsz, seqlen, _ = x.size()
|
457 |
+
end_pos = start_pos + seqlen
|
458 |
+
if self.q_lora_rank == 0:
|
459 |
+
q = self.wq(x)
|
460 |
+
else:
|
461 |
+
q = self.wq_b(self.q_norm(self.wq_a(x)))
|
462 |
+
q = q.view(bsz, seqlen, self.n_local_heads, self.qk_head_dim)
|
463 |
+
q_nope, q_pe = torch.split(q, [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
|
464 |
+
q_pe = apply_rotary_emb(q_pe, freqs_cis)
|
465 |
+
kv = self.wkv_a(x)
|
466 |
+
kv, k_pe = torch.split(kv, [self.kv_lora_rank, self.qk_rope_head_dim], dim=-1)
|
467 |
+
k_pe = apply_rotary_emb(k_pe.unsqueeze(2), freqs_cis)
|
468 |
+
if attn_impl == "naive":
|
469 |
+
q = torch.cat([q_nope, q_pe], dim=-1)
|
470 |
+
kv = self.wkv_b(self.kv_norm(kv))
|
471 |
+
kv = kv.view(bsz, seqlen, self.n_local_heads, self.qk_nope_head_dim + self.v_head_dim)
|
472 |
+
k_nope, v = torch.split(kv, [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
|
473 |
+
k = torch.cat([k_nope, k_pe.expand(-1, -1, self.n_local_heads, -1)], dim=-1)
|
474 |
+
self.k_cache[:bsz, start_pos:end_pos] = k
|
475 |
+
self.v_cache[:bsz, start_pos:end_pos] = v
|
476 |
+
scores = torch.einsum("bshd,bthd->bsht", q, self.k_cache[:bsz, :end_pos]) * self.softmax_scale
|
477 |
+
else:
|
478 |
+
wkv_b = self.wkv_b.weight if self.wkv_b.scale is None else weight_dequant(self.wkv_b.weight, self.wkv_b.scale, block_size)
|
479 |
+
wkv_b = wkv_b.view(self.n_local_heads, -1, self.kv_lora_rank)
|
480 |
+
q_nope = torch.einsum("bshd,hdc->bshc", q_nope, wkv_b[:, :self.qk_nope_head_dim])
|
481 |
+
self.kv_cache[:bsz, start_pos:end_pos] = self.kv_norm(kv)
|
482 |
+
self.pe_cache[:bsz, start_pos:end_pos] = k_pe.squeeze(2)
|
483 |
+
scores = (torch.einsum("bshc,btc->bsht", q_nope, self.kv_cache[:bsz, :end_pos]) +
|
484 |
+
torch.einsum("bshr,btr->bsht", q_pe, self.pe_cache[:bsz, :end_pos])) * self.softmax_scale
|
485 |
+
if mask is not None:
|
486 |
+
scores += mask.unsqueeze(1)
|
487 |
+
scores = scores.softmax(dim=-1, dtype=torch.float32).type_as(x)
|
488 |
+
if attn_impl == "naive":
|
489 |
+
x = torch.einsum("bsht,bthd->bshd", scores, self.v_cache[:bsz, :end_pos])
|
490 |
+
else:
|
491 |
+
x = torch.einsum("bsht,btc->bshc", scores, self.kv_cache[:bsz, :end_pos])
|
492 |
+
x = torch.einsum("bshc,hdc->bshd", x, wkv_b[:, -self.v_head_dim:])
|
493 |
+
x = self.wo(x.flatten(2))
|
494 |
+
return x
|
495 |
+
|
496 |
+
|
497 |
+
class MLP(nn.Module):
|
498 |
+
"""
|
499 |
+
Multi-Layer Perceptron (MLP) used as a feed-forward layer.
|
500 |
+
|
501 |
+
Attributes:
|
502 |
+
w1 (nn.Module): Linear layer for input-to-hidden transformation.
|
503 |
+
w2 (nn.Module): Linear layer for hidden-to-output transformation.
|
504 |
+
w3 (nn.Module): Additional linear layer for feature transformation.
|
505 |
+
"""
|
506 |
+
def __init__(self, dim: int, inter_dim: int):
|
507 |
+
"""
|
508 |
+
Initializes the MLP layer.
|
509 |
+
|
510 |
+
Args:
|
511 |
+
dim (int): Input and output dimensionality.
|
512 |
+
inter_dim (int): Hidden layer dimensionality.
|
513 |
+
"""
|
514 |
+
super().__init__()
|
515 |
+
self.w1 = ColumnParallelLinear(dim, inter_dim)
|
516 |
+
self.w2 = RowParallelLinear(inter_dim, dim)
|
517 |
+
self.w3 = ColumnParallelLinear(dim, inter_dim)
|
518 |
+
|
519 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
520 |
+
"""
|
521 |
+
Forward pass for the MLP layer.
|
522 |
+
|
523 |
+
Args:
|
524 |
+
x (torch.Tensor): Input tensor.
|
525 |
+
|
526 |
+
Returns:
|
527 |
+
torch.Tensor: Output tensor after MLP computation.
|
528 |
+
"""
|
529 |
+
return self.w2(F.silu(self.w1(x)) * self.w3(x))
|
530 |
+
|
531 |
+
|
532 |
+
class Gate(nn.Module):
|
533 |
+
"""
|
534 |
+
Gating mechanism for routing inputs in a mixture-of-experts (MoE) model.
|
535 |
+
|
536 |
+
Attributes:
|
537 |
+
dim (int): Dimensionality of input features.
|
538 |
+
topk (int): Number of top experts activated for each input.
|
539 |
+
n_groups (int): Number of groups for routing.
|
540 |
+
topk_groups (int): Number of groups to route inputs to.
|
541 |
+
score_func (str): Scoring function ('softmax' or 'sigmoid').
|
542 |
+
route_scale (float): Scaling factor for routing weights.
|
543 |
+
weight (torch.nn.Parameter): Learnable weights for the gate.
|
544 |
+
bias (Optional[torch.nn.Parameter]): Optional bias term for the gate.
|
545 |
+
"""
|
546 |
+
def __init__(self, args: ModelArgs):
|
547 |
+
"""
|
548 |
+
Initializes the Gate module.
|
549 |
+
|
550 |
+
Args:
|
551 |
+
args (ModelArgs): Model arguments containing gating parameters.
|
552 |
+
"""
|
553 |
+
super().__init__()
|
554 |
+
self.dim = args.dim
|
555 |
+
self.topk = args.n_activated_experts
|
556 |
+
self.n_groups = args.n_expert_groups
|
557 |
+
self.topk_groups = args.n_limited_groups
|
558 |
+
self.score_func = args.score_func
|
559 |
+
self.route_scale = args.route_scale
|
560 |
+
self.weight = nn.Parameter(torch.empty(args.n_routed_experts, args.dim))
|
561 |
+
self.bias = nn.Parameter(torch.empty(args.n_routed_experts)) if self.dim == 7168 else None
|
562 |
+
|
563 |
+
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
564 |
+
"""
|
565 |
+
Forward pass for the gating mechanism.
|
566 |
+
|
567 |
+
Args:
|
568 |
+
x (torch.Tensor): Input tensor.
|
569 |
+
|
570 |
+
Returns:
|
571 |
+
Tuple[torch.Tensor, torch.Tensor]: Routing weights and selected expert indices.
|
572 |
+
"""
|
573 |
+
scores = linear(x, self.weight)
|
574 |
+
if self.score_func == "softmax":
|
575 |
+
scores = scores.softmax(dim=-1, dtype=torch.float32)
|
576 |
+
else:
|
577 |
+
scores = scores.sigmoid()
|
578 |
+
original_scores = scores
|
579 |
+
if self.bias is not None:
|
580 |
+
scores = scores + self.bias
|
581 |
+
if self.n_groups > 1:
|
582 |
+
scores = scores.view(x.size(0), self.n_groups, -1)
|
583 |
+
if self.bias is None:
|
584 |
+
group_scores = scores.amax(dim=-1)
|
585 |
+
else:
|
586 |
+
group_scores = scores.topk(2, dim=-1)[0].sum(dim=-1)
|
587 |
+
indices = group_scores.topk(self.topk_groups, dim=-1)[1]
|
588 |
+
mask = torch.zeros_like(scores[..., 0]).scatter_(1, indices, True)
|
589 |
+
scores = (scores * mask.unsqueeze(-1)).flatten(1)
|
590 |
+
indices = torch.topk(scores, self.topk, dim=-1)[1]
|
591 |
+
weights = original_scores.gather(1, indices)
|
592 |
+
if self.score_func == "sigmoid":
|
593 |
+
weights /= weights.sum(dim=-1, keepdim=True)
|
594 |
+
weights *= self.route_scale
|
595 |
+
return weights.type_as(x), indices
|
596 |
+
|
597 |
+
|
598 |
+
class Expert(nn.Module):
|
599 |
+
"""
|
600 |
+
Expert layer for Mixture-of-Experts (MoE) models.
|
601 |
+
|
602 |
+
Attributes:
|
603 |
+
w1 (nn.Module): Linear layer for input-to-hidden transformation.
|
604 |
+
w2 (nn.Module): Linear layer for hidden-to-output transformation.
|
605 |
+
w3 (nn.Module): Additional linear layer for feature transformation.
|
606 |
+
"""
|
607 |
+
def __init__(self, dim: int, inter_dim: int):
|
608 |
+
"""
|
609 |
+
Initializes the Expert layer.
|
610 |
+
|
611 |
+
Args:
|
612 |
+
dim (int): Input and output dimensionality.
|
613 |
+
inter_dim (int): Hidden layer dimensionality.
|
614 |
+
"""
|
615 |
+
super().__init__()
|
616 |
+
self.w1 = Linear(dim, inter_dim)
|
617 |
+
self.w2 = Linear(inter_dim, dim)
|
618 |
+
self.w3 = Linear(dim, inter_dim)
|
619 |
+
|
620 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
621 |
+
"""
|
622 |
+
Forward pass for the Expert layer.
|
623 |
+
|
624 |
+
Args:
|
625 |
+
x (torch.Tensor): Input tensor.
|
626 |
+
|
627 |
+
Returns:
|
628 |
+
torch.Tensor: Output tensor after expert computation.
|
629 |
+
"""
|
630 |
+
return self.w2(F.silu(self.w1(x)) * self.w3(x))
|
631 |
+
|
632 |
+
|
633 |
+
class MoE(nn.Module):
|
634 |
+
"""
|
635 |
+
Mixture-of-Experts (MoE) module.
|
636 |
+
|
637 |
+
Attributes:
|
638 |
+
dim (int): Dimensionality of input features.
|
639 |
+
n_routed_experts (int): Total number of experts in the model.
|
640 |
+
n_local_experts (int): Number of experts handled locally in distributed systems.
|
641 |
+
n_activated_experts (int): Number of experts activated for each input.
|
642 |
+
gate (nn.Module): Gating mechanism to route inputs to experts.
|
643 |
+
experts (nn.ModuleList): List of expert modules.
|
644 |
+
shared_experts (nn.Module): Shared experts applied to all inputs.
|
645 |
+
"""
|
646 |
+
def __init__(self, args: ModelArgs):
|
647 |
+
"""
|
648 |
+
Initializes the MoE module.
|
649 |
+
|
650 |
+
Args:
|
651 |
+
args (ModelArgs): Model arguments containing MoE parameters.
|
652 |
+
"""
|
653 |
+
super().__init__()
|
654 |
+
self.dim = args.dim
|
655 |
+
assert args.n_routed_experts % world_size == 0
|
656 |
+
self.n_routed_experts = args.n_routed_experts
|
657 |
+
self.n_local_experts = args.n_routed_experts // world_size
|
658 |
+
self.n_activated_experts = args.n_activated_experts
|
659 |
+
self.experts_start_idx = rank * self.n_local_experts
|
660 |
+
self.experts_end_idx = self.experts_start_idx + self.n_local_experts
|
661 |
+
self.gate = Gate(args)
|
662 |
+
self.experts = nn.ModuleList([Expert(args.dim, args.moe_inter_dim) if self.experts_start_idx <= i < self.experts_end_idx else None
|
663 |
+
for i in range(self.n_routed_experts)])
|
664 |
+
self.shared_experts = MLP(args.dim, args.n_shared_experts * args.moe_inter_dim)
|
665 |
+
|
666 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
667 |
+
"""
|
668 |
+
Forward pass for the MoE module.
|
669 |
+
|
670 |
+
Args:
|
671 |
+
x (torch.Tensor): Input tensor.
|
672 |
+
|
673 |
+
Returns:
|
674 |
+
torch.Tensor: Output tensor after expert routing and computation.
|
675 |
+
"""
|
676 |
+
shape = x.size()
|
677 |
+
x = x.view(-1, self.dim)
|
678 |
+
weights, indices = self.gate(x)
|
679 |
+
y = torch.zeros_like(x)
|
680 |
+
counts = torch.bincount(indices.flatten(), minlength=self.n_routed_experts).tolist()
|
681 |
+
for i in range(self.experts_start_idx, self.experts_end_idx):
|
682 |
+
if counts[i] == 0:
|
683 |
+
continue
|
684 |
+
expert = self.experts[i]
|
685 |
+
idx, top = torch.where(indices == i)
|
686 |
+
y[idx] += expert(x[idx]) * weights[idx, top, None]
|
687 |
+
z = self.shared_experts(x)
|
688 |
+
if world_size > 1:
|
689 |
+
dist.all_reduce(y)
|
690 |
+
return (y + z).view(shape)
|
691 |
+
|
692 |
+
|
693 |
+
class Block(nn.Module):
|
694 |
+
"""
|
695 |
+
Transformer block combining attention and feed-forward layers.
|
696 |
+
|
697 |
+
Attributes:
|
698 |
+
attn (nn.Module): Attention layer (MLA).
|
699 |
+
ffn (nn.Module): Feed-forward network (MLP or MoE).
|
700 |
+
attn_norm (nn.Module): Layer normalization for attention.
|
701 |
+
ffn_norm (nn.Module): Layer normalization for feed-forward network.
|
702 |
+
"""
|
703 |
+
def __init__(self, layer_id: int, args: ModelArgs):
|
704 |
+
"""
|
705 |
+
Initializes the Transformer block.
|
706 |
+
|
707 |
+
Args:
|
708 |
+
layer_id (int): Layer index in the transformer.
|
709 |
+
args (ModelArgs): Model arguments containing block parameters.
|
710 |
+
"""
|
711 |
+
super().__init__()
|
712 |
+
self.attn = MLA(args)
|
713 |
+
self.ffn = MLP(args.dim, args.inter_dim) if layer_id < args.n_dense_layers else MoE(args)
|
714 |
+
self.attn_norm = RMSNorm(args.dim)
|
715 |
+
self.ffn_norm = RMSNorm(args.dim)
|
716 |
+
|
717 |
+
def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]) -> torch.Tensor:
|
718 |
+
"""
|
719 |
+
Forward pass for the Transformer block.
|
720 |
+
|
721 |
+
Args:
|
722 |
+
x (torch.Tensor): Input tensor.
|
723 |
+
start_pos (int): Starting position in the sequence.
|
724 |
+
freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings.
|
725 |
+
mask (Optional[torch.Tensor]): Mask tensor to exclude certain positions from attention.
|
726 |
+
|
727 |
+
Returns:
|
728 |
+
torch.Tensor: Output tensor after block computation.
|
729 |
+
"""
|
730 |
+
x = x + self.attn(self.attn_norm(x), start_pos, freqs_cis, mask)
|
731 |
+
x = x + self.ffn(self.ffn_norm(x))
|
732 |
+
return x
|
733 |
+
|
734 |
+
|
735 |
+
class Transformer(nn.Module):
|
736 |
+
"""
|
737 |
+
Transformer model with positional embeddings, multiple layers, and output projection.
|
738 |
+
|
739 |
+
Attributes:
|
740 |
+
max_seq_len (int): Maximum sequence length for the transformer.
|
741 |
+
embed (nn.Module): Embedding layer for input tokens.
|
742 |
+
layers (torch.nn.ModuleList): List of transformer blocks.
|
743 |
+
norm (nn.Module): Layer normalization applied after all blocks.
|
744 |
+
head (nn.Module): Output projection layer mapping to vocabulary size.
|
745 |
+
freqs_cis (torch.Tensor): Precomputed complex exponential values for rotary embeddings.
|
746 |
+
"""
|
747 |
+
def __init__(self, args: ModelArgs):
|
748 |
+
"""
|
749 |
+
Initializes the Transformer model.
|
750 |
+
|
751 |
+
Args:
|
752 |
+
args (ModelArgs): Model arguments containing transformer parameters.
|
753 |
+
"""
|
754 |
+
global world_size, rank
|
755 |
+
world_size = dist.get_world_size() if dist.is_initialized() else 1
|
756 |
+
rank = dist.get_rank() if dist.is_initialized() else 0
|
757 |
+
Linear.dtype = torch.float8_e4m3fn if args.dtype == "fp8" else torch.bfloat16
|
758 |
+
super().__init__()
|
759 |
+
self.max_seq_len = args.max_seq_len
|
760 |
+
self.embed = ParallelEmbedding(args.vocab_size, args.dim)
|
761 |
+
self.layers = torch.nn.ModuleList()
|
762 |
+
for layer_id in range(args.n_layers):
|
763 |
+
self.layers.append(Block(layer_id, args))
|
764 |
+
self.norm = RMSNorm(args.dim)
|
765 |
+
self.head = ColumnParallelLinear(args.dim, args.vocab_size, dtype=torch.get_default_dtype())
|
766 |
+
self.register_buffer("freqs_cis", precompute_freqs_cis(args), persistent=False)
|
767 |
+
|
768 |
+
@torch.inference_mode()
|
769 |
+
def forward(self, tokens: torch.Tensor, start_pos: int = 0):
|
770 |
+
"""
|
771 |
+
Forward pass for the Transformer model.
|
772 |
+
|
773 |
+
Args:
|
774 |
+
tokens (torch.Tensor): Input tensor of token IDs with shape (batch_size, seq_len).
|
775 |
+
start_pos (int, optional): Starting position in the sequence for rotary embeddings. Defaults to 0.
|
776 |
+
|
777 |
+
Returns:
|
778 |
+
torch.Tensor: Logits tensor of shape (batch_size, vocab_size).
|
779 |
+
"""
|
780 |
+
seqlen = tokens.size(1)
|
781 |
+
h = self.embed(tokens)
|
782 |
+
freqs_cis = self.freqs_cis[start_pos:start_pos+seqlen]
|
783 |
+
mask = None
|
784 |
+
if seqlen > 1:
|
785 |
+
mask = torch.full((seqlen, seqlen), float("-inf"), device=tokens.device).triu_(1)
|
786 |
+
for layer in self.layers:
|
787 |
+
h = layer(h, start_pos, freqs_cis, mask)
|
788 |
+
h = self.norm(h)[:, -1]
|
789 |
+
logits = self.head(h)
|
790 |
+
if world_size > 1:
|
791 |
+
all_logits = [torch.empty_like(logits) for _ in range(world_size)]
|
792 |
+
dist.all_gather(all_logits, logits)
|
793 |
+
logits = torch.cat(all_logits, dim=-1)
|
794 |
+
return logits
|
795 |
+
|
796 |
+
|
797 |
+
if __name__ == "__main__":
|
798 |
+
torch.set_default_dtype(torch.bfloat16)
|
799 |
+
torch.set_default_device("cuda")
|
800 |
+
torch.manual_seed(0)
|
801 |
+
args = ModelArgs()
|
802 |
+
x = torch.randint(0, args.vocab_size, (2, 128))
|
803 |
+
model = Transformer(args)
|
804 |
+
print(model(x).size())
|
requirements.txt
CHANGED
@@ -1 +1,5 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.4.1
|
2 |
+
triton==3.0.0
|
3 |
+
transformers==4.46.3
|
4 |
+
safetensors==0.4.5
|
5 |
+
huggingface_hub==0.25.2
|