ayuzawa commited on
Commit
bc35199
·
verified ·
1 Parent(s): 1817c1b

Model save

Browse files
README.md CHANGED
@@ -1,7 +1,7 @@
1
  ---
2
  library_name: peft
3
  license: llama3.2
4
- base_model: alpindale/Llama-3.2-11B-Vision-Instruct
5
  tags:
6
  - generated_from_trainer
7
  model-index:
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # lora
16
 
17
- This model is a fine-tuned version of [alpindale/Llama-3.2-11B-Vision-Instruct](https://huggingface.co/alpindale/Llama-3.2-11B-Vision-Instruct) on an unknown dataset.
18
 
19
  ## Model description
20
 
@@ -42,7 +42,7 @@ The following hyperparameters were used during training:
42
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
- - num_epochs: 1
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
 
1
  ---
2
  library_name: peft
3
  license: llama3.2
4
+ base_model: Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge
5
  tags:
6
  - generated_from_trainer
7
  model-index:
 
14
 
15
  # lora
16
 
17
+ This model is a fine-tuned version of [Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge](https://huggingface.co/Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
42
  - optimizer: Use OptimizerNames.ADAMW_HF with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
+ - num_epochs: 10
46
  - mixed_precision_training: Native AMP
47
 
48
  ### Training results
adapter_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "base_model_class": "MllamaForConditionalGeneration",
5
  "parent_library": "transformers.models.mllama.modeling_mllama"
6
  },
7
- "base_model_name_or_path": "alpindale/Llama-3.2-11B-Vision-Instruct",
8
  "bias": "none",
9
  "eva_config": null,
10
  "exclude_modules": null,
@@ -27,12 +27,12 @@
27
  "revision": null,
28
  "target_modules": [
29
  "v_proj",
30
- "q_proj",
31
- "up_proj",
32
  "down_proj",
33
- "k_proj",
34
  "o_proj",
35
- "gate_proj"
 
36
  ],
37
  "task_type": null,
38
  "use_dora": true,
 
4
  "base_model_class": "MllamaForConditionalGeneration",
5
  "parent_library": "transformers.models.mllama.modeling_mllama"
6
  },
7
+ "base_model_name_or_path": "Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge",
8
  "bias": "none",
9
  "eva_config": null,
10
  "exclude_modules": null,
 
27
  "revision": null,
28
  "target_modules": [
29
  "v_proj",
30
+ "gate_proj",
 
31
  "down_proj",
32
+ "q_proj",
33
  "o_proj",
34
+ "k_proj",
35
+ "up_proj"
36
  ],
37
  "task_type": null,
38
  "use_dora": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9df8be3d456dcc78d7ad14050738589f046087b89f227f8e1da5ec26e032ee4b
3
  size 125866776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:243d30a92ab2c69c6ebfc3e3822666593614ac485e4d253cd85dcdda4c4ac25d
3
  size 125866776
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d23a1f32ab6bce0f981768bbac4bf8756b5e2cc4c71db976163be7a16401af7
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a6e67a4a322b46ca40edd02fa7854e9b0bedb8e168b6cc5ad02c5482bc97734
3
  size 5304