DeepseekVL2-4bit / config.json
krishnateja95's picture
Upload folder using huggingface_hub
c337a43 verified
{
"_name_or_path": "deepseek-ai/deepseek-vl2-tiny",
"architectures": [
"DeepseekVLV2ForCausalLM"
],
"candidate_resolutions": [
[
384,
384
],
[
384,
768
],
[
768,
384
],
[
384,
1152
],
[
1152,
384
],
[
384,
1536
],
[
1536,
384
],
[
768,
768
],
[
384,
1920
],
[
1920,
384
],
[
384,
2304
],
[
2304,
384
],
[
768,
1152
],
[
1152,
768
],
[
384,
2688
],
[
2688,
384
],
[
384,
3072
],
[
3072,
384
],
[
768,
1536
],
[
1536,
768
],
[
384,
3456
],
[
3456,
384
],
[
1152,
1152
]
],
"global_view_pos": "head",
"language_config": {
"_attn_implementation_autoset": true,
"architectures": [
"DeepseekV2ForCausalLM"
],
"auto_map": {
"AutoConfig": "configuration_deepseek.DeepseekV2Config",
"AutoModel": "modeling_deepseek.DeepseekV2Model",
"AutoModelForCausalLM": "modeling_deepseek.DeepseekV2ForCausalLM"
},
"bos_token_id": 0,
"eos_token_id": 1,
"first_k_dense_replace": 1,
"hidden_size": 1280,
"intermediate_size": 6848,
"kv_lora_rank": null,
"lm_head": true,
"max_position_embeddings": 4096,
"model_type": "deepseek_v2",
"moe_intermediate_size": 896,
"n_group": 1,
"n_routed_experts": 64,
"n_shared_experts": 2,
"num_attention_heads": 10,
"num_experts_per_tok": 6,
"num_hidden_layers": 12,
"num_key_value_heads": 10,
"q_lora_rank": null,
"qk_nope_head_dim": 0,
"qk_rope_head_dim": 0,
"rm_head": false,
"topk_group": 1,
"topk_method": "greedy",
"torch_dtype": "bfloat16",
"use_mla": false,
"v_head_dim": 0,
"vocab_size": 129280
},
"model_type": "deepseek_vl_v2",
"projector_config": {
"model_type": "mlp_projector",
"n_embed": 1280
},
"quantization_config": {
"amp": true,
"autoround_version": "0.4.5",
"batch_size": 8,
"bits": 4,
"data_type": "int",
"dataset": "NeelNanda/pile-10k",
"enable_minmax_tuning": true,
"enable_norm_bias_tuning": false,
"enable_quanted_input": true,
"gradient_accumulate_steps": 1,
"group_size": 32,
"iters": 200,
"low_gpu_mem_usage": false,
"lr": 0.005,
"minmax_lr": 0.005,
"modules_to_not_convert": [
"vision.blocks",
"projector.layers",
"vision.attn_pool.q",
"vision.attn_pool.kv",
"vision.attn_pool.proj",
"vision.attn_pool.mlp.fc1",
"vision.attn_pool.mlp.fc2",
"language.lm_head"
],
"nsamples": 128,
"quant_method": "awq",
"scale_dtype": "torch.float16",
"seqlen": 2048,
"sym": true,
"to_quant_block_names": "language.model.layers",
"version": "gemm",
"zero_point": false
},
"tile_tag": "2D",
"torch_dtype": "float16",
"transformers_version": "4.47.1",
"vision_config": {
"layers": 27,
"mlp_ratio": 3.7362,
"model_name": "siglip_so400m_patch14_384",
"model_type": "vision",
"patch_size": 14,
"width": 1152
}
}