unsloth_finetune2 / config.json
danielhanchen's picture
(Trained with Unsloth)
b216442 verified
{
"architectures": [
"MllamaForConditionalGeneration"
],
"image_token_index": 128256,
"model_type": "mllama",
"pad_token_id": 128004,
"text_config": {
"cross_attention_layers": [
3,
8,
13,
18,
23,
28,
33,
38
],
"dropout": 0,
"eos_token_id": [
128001,
128008,
128009
],
"hidden_act": "silu",
"hidden_size": 4096,
"initializer_range": 0.02,
"intermediate_size": 14336,
"max_position_embeddings": 131072,
"model_type": "mllama_text_model",
"num_attention_heads": 32,
"num_hidden_layers": 40,
"num_key_value_heads": 8,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 8.0,
"high_freq_factor": 4.0,
"low_freq_factor": 1.0,
"original_max_position_embeddings": 8192,
"rope_type": "llama3"
},
"rope_theta": 500000.0,
"torch_dtype": "float16",
"use_cache": true,
"vocab_size": 128256
},
"torch_dtype": "float16",
"transformers_version": "4.50.0.dev0",
"unsloth_fixed": true,
"unsloth_version": "2025.3.10",
"vision_config": {
"attention_heads": 16,
"hidden_act": "gelu",
"hidden_size": 1280,
"image_size": 560,
"initializer_range": 0.02,
"intermediate_layers_indices": [
3,
7,
15,
23,
30
],
"intermediate_size": 5120,
"max_num_tiles": 4,
"model_type": "mllama_vision_model",
"norm_eps": 1e-05,
"num_channels": 3,
"num_global_layers": 8,
"num_hidden_layers": 32,
"patch_size": 14,
"supported_aspect_ratios": [
[
1,
1
],
[
1,
2
],
[
1,
3
],
[
1,
4
],
[
2,
1
],
[
2,
2
],
[
3,
1
],
[
4,
1
]
],
"torch_dtype": "float16",
"vision_output_dim": 7680
}
}