|
attention_logit_softcapping: null |
|
attention_scores_scalar: null |
|
attn_bias: false |
|
bias: false |
|
block_size: 2048 |
|
final_logit_softcapping: null |
|
gelu_approximate: none |
|
head_size: 64 |
|
hf_config: |
|
name: MicroLlama |
|
org: keeeeenw |
|
intermediate_size: 5632 |
|
lm_head_bias: false |
|
mlp_class_name: LLaMAMLP |
|
n_embd: 1024 |
|
n_expert: 0 |
|
n_expert_per_token: 0 |
|
n_head: 16 |
|
n_layer: 12 |
|
n_query_groups: 4 |
|
name: micro-llama-300M |
|
norm_class_name: RMSNorm |
|
norm_eps: 1.0e-05 |
|
padded_vocab_size: 32000 |
|
padding_multiple: 64 |
|
parallel_residual: false |
|
post_attention_norm: false |
|
post_mlp_norm: false |
|
rope_adjustments: null |
|
rope_base: 10000 |
|
rope_condense_ratio: 1 |
|
rotary_percentage: 1.0 |
|
scale_embeddings: false |
|
shared_attention_norm: false |
|
sliding_window_layer_placing: null |
|
sliding_window_size: null |
|
vocab_size: 32000 |
|
|