Text Generation
Transformers
Safetensors
qwen3_moe
programming
code generation
code
codeqwen
Mixture of Experts
coding
coder
qwen2
chat
qwen
qwen-coder
Qwen3-Coder-30B-A3B-Instruct
Qwen3-30B-A3B
mixture of experts
128 experts
8 active experts
1000k context
1 million context
qwen3
finetune
brainstorm 20x
brainstorm
optional thinking
conversational
Update config.json
Browse files- config.json +2 -2
config.json
CHANGED
@@ -11,7 +11,7 @@
|
|
11 |
"hidden_size": 2048,
|
12 |
"initializer_range": 0.02,
|
13 |
"intermediate_size": 5472,
|
14 |
-
"max_position_embeddings":
|
15 |
"max_window_layers": 28,
|
16 |
"mlp_only_layers": [],
|
17 |
"model_type": "qwen3_moe",
|
@@ -27,7 +27,7 @@
|
|
27 |
"rms_norm_eps": 1e-06,
|
28 |
"rope_scaling": {
|
29 |
"type": "yarn",
|
30 |
-
"factor":
|
31 |
"original_max_position_embeddings": 262144
|
32 |
},
|
33 |
"rope_theta": 10000000,
|
|
|
11 |
"hidden_size": 2048,
|
12 |
"initializer_range": 0.02,
|
13 |
"intermediate_size": 5472,
|
14 |
+
"max_position_embeddings": 1048576,
|
15 |
"max_window_layers": 28,
|
16 |
"mlp_only_layers": [],
|
17 |
"model_type": "qwen3_moe",
|
|
|
27 |
"rms_norm_eps": 1e-06,
|
28 |
"rope_scaling": {
|
29 |
"type": "yarn",
|
30 |
+
"factor": 4.0,
|
31 |
"original_max_position_embeddings": 262144
|
32 |
},
|
33 |
"rope_theta": 10000000,
|