Doctor-Shotgun's picture
New conversion scripts
f97eef1 verified
import glob
import re
import shutil
import sys
import accelerate
import torch
from configuration_qwen3_shared_moe import Qwen3SharedMoeConfig
from safetensors import safe_open
from transformers.models.qwen3_moe.configuration_qwen3_moe import Qwen3MoeConfig
from transformers.models.qwen3_moe.modeling_qwen3_moe import Qwen3MoeForCausalLM
input_model = sys.argv[1]
output_model_path = sys.argv[2]
cfg_shared_moe = Qwen3SharedMoeConfig.from_pretrained(input_model)
cfg_standard_moe = Qwen3MoeConfig(
vocab_size=cfg_shared_moe.vocab_size,
hidden_size=cfg_shared_moe.hidden_size,
intermediate_size=cfg_shared_moe.intermediate_size,
num_hidden_layers=cfg_shared_moe.num_hidden_layers,
num_attention_heads=cfg_shared_moe.num_attention_heads,
num_key_value_heads=cfg_shared_moe.num_key_value_heads,
hidden_act=cfg_shared_moe.hidden_act,
max_position_embeddings=cfg_shared_moe.max_position_embeddings,
initializer_range=cfg_shared_moe.initializer_range,
rms_norm_eps=cfg_shared_moe.rms_norm_eps,
use_cache=cfg_shared_moe.use_cache,
tie_word_embeddings=cfg_shared_moe.tie_word_embeddings,
rope_theta=cfg_shared_moe.rope_theta,
rope_scaling=cfg_shared_moe.rope_scaling,
attention_bias=cfg_shared_moe.attention_bias,
use_sliding_window=cfg_shared_moe.use_sliding_window,
sliding_window=cfg_shared_moe.sliding_window,
max_window_layers=cfg_shared_moe.max_window_layers,
attention_dropout=cfg_shared_moe.attention_dropout,
decoder_sparse_step=cfg_shared_moe.decoder_sparse_step,
moe_intermediate_size=cfg_shared_moe.moe_intermediate_size,
num_experts_per_tok=cfg_shared_moe.num_experts_per_tok,
num_experts=cfg_shared_moe.num_experts,
norm_topk_prob=cfg_shared_moe.norm_topk_prob,
output_router_logits=cfg_shared_moe.output_router_logits,
router_aux_loss_coef=cfg_shared_moe.router_aux_loss_coef,
mlp_only_layers=cfg_shared_moe.mlp_only_layers,
head_dim=cfg_shared_moe.head_dim,
)
num_experts = cfg_standard_moe.num_experts
with accelerate.init_empty_weights():
model_standard_moe = Qwen3MoeForCausalLM(cfg_shared_moe)
model_standard_moe = model_standard_moe.to(torch.bfloat16)
new_state_dict = {}
pattern = f"{input_model}/model-*-of-*.safetensors"
files = sorted(glob.glob(pattern))
if len(files) == 0:
raise FileNotFoundError
tensors = {}
for file_path in files:
print(f"processing {file_path}")
with safe_open(file_path, framework="pt", device="cpu") as f:
for key in f.keys():
tensor = f.get_tensor(key)
tensors[key] = tensor
for key in tensors:
if "moe_mlp" not in key:
new_state_dict[key] = tensors[key]
elif "moe_mlp.output_experts" in key:
layer_num = int(re.search(r"\d+", key).group())
for i, tensor in enumerate(torch.unbind(tensors[key])):
new_state_dict[
f"model.layers.{layer_num}.mlp.experts.{i}.down_proj.weight"
] = tensor.contiguous()
elif "moe_mlp.experts" in key:
layer_num = int(re.search(r"\d+", key).group())
for i, tensor in enumerate(torch.unbind(tensors[key])):
(
new_state_dict[
f"model.layers.{layer_num}.mlp.experts.{i}.up_proj.weight"
],
new_state_dict[
f"model.layers.{layer_num}.mlp.experts.{i}.gate_proj.weight"
],
) = torch.chunk(tensor, 2, dim=0)
model_standard_moe.load_state_dict(new_state_dict, strict=True, assign=True)
model_standard_moe.save_pretrained(output_model_path)
cfg_standard_moe.save_pretrained(output_model_path)
for i in ["merges.txt", "tokenizer_config.json", "tokenizer.json", "vocab.json"]:
shutil.copy(input_model + "/" + i, output_model_path + "/" + i)