ericrisco/llama2-instruct-tune-500s
Browse files- README.md +36 -37
- adapter_config.json +4 -1
- adapter_model.safetensors +1 -1
- runs/Sep26_13-01-01_3d63c6bcbcfd/events.out.tfevents.1727355710.3d63c6bcbcfd.3916.0 +3 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +1 -0
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
---
|
|
|
|
|
|
|
2 |
library_name: peft
|
3 |
tags:
|
4 |
- trl
|
5 |
- sft
|
6 |
- generated_from_trainer
|
7 |
-
datasets:
|
8 |
-
- generator
|
9 |
-
base_model: NousResearch/Llama-2-7b-hf
|
10 |
model-index:
|
11 |
- name: llama2_instruct_generation
|
12 |
results: []
|
@@ -19,7 +19,7 @@ should probably proofread and complete it, then remove this comment. -->
|
|
19 |
|
20 |
This model is a fine-tuned version of [NousResearch/Llama-2-7b-hf](https://huggingface.co/NousResearch/Llama-2-7b-hf) on the generator dataset.
|
21 |
It achieves the following results on the evaluation set:
|
22 |
-
- Loss: 1.
|
23 |
|
24 |
## Model description
|
25 |
|
@@ -44,44 +44,43 @@ The following hyperparameters were used during training:
|
|
44 |
- seed: 42
|
45 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
46 |
- lr_scheduler_type: constant
|
47 |
-
- lr_scheduler_warmup_steps: 0.03
|
48 |
- training_steps: 500
|
49 |
|
50 |
### Training results
|
51 |
|
52 |
-
| Training Loss | Epoch
|
53 |
-
|
54 |
-
| 1.
|
55 |
-
| 1.
|
56 |
-
| 1.
|
57 |
-
| 1.
|
58 |
-
| 1.
|
59 |
-
| 1.
|
60 |
-
| 1.
|
61 |
-
| 1.
|
62 |
-
| 1.
|
63 |
-
| 1.
|
64 |
-
| 1.
|
65 |
-
| 1.
|
66 |
-
| 1.
|
67 |
-
| 1.
|
68 |
-
| 1.
|
69 |
-
| 1.
|
70 |
-
| 1.
|
71 |
-
| 1.
|
72 |
-
| 1.
|
73 |
-
| 1.
|
74 |
-
| 1.
|
75 |
-
| 1.
|
76 |
-
| 1.
|
77 |
-
| 1.
|
78 |
-
| 1.
|
79 |
|
80 |
|
81 |
### Framework versions
|
82 |
|
83 |
-
- PEFT 0.
|
84 |
-
- Transformers 4.
|
85 |
-
- Pytorch 2.1
|
86 |
-
- Datasets
|
87 |
-
- Tokenizers 0.
|
|
|
1 |
---
|
2 |
+
base_model: NousResearch/Llama-2-7b-hf
|
3 |
+
datasets:
|
4 |
+
- generator
|
5 |
library_name: peft
|
6 |
tags:
|
7 |
- trl
|
8 |
- sft
|
9 |
- generated_from_trainer
|
|
|
|
|
|
|
10 |
model-index:
|
11 |
- name: llama2_instruct_generation
|
12 |
results: []
|
|
|
19 |
|
20 |
This model is a fine-tuned version of [NousResearch/Llama-2-7b-hf](https://huggingface.co/NousResearch/Llama-2-7b-hf) on the generator dataset.
|
21 |
It achieves the following results on the evaluation set:
|
22 |
+
- Loss: 1.6761
|
23 |
|
24 |
## Model description
|
25 |
|
|
|
44 |
- seed: 42
|
45 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
46 |
- lr_scheduler_type: constant
|
|
|
47 |
- training_steps: 500
|
48 |
|
49 |
### Training results
|
50 |
|
51 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
52 |
+
|:-------------:|:------:|:----:|:---------------:|
|
53 |
+
| 1.9468 | 0.0027 | 20 | 1.8141 |
|
54 |
+
| 1.8737 | 0.0054 | 40 | 1.7848 |
|
55 |
+
| 1.8769 | 0.0081 | 60 | 1.7718 |
|
56 |
+
| 1.8634 | 0.0108 | 80 | 1.7598 |
|
57 |
+
| 1.8584 | 0.0135 | 100 | 1.7469 |
|
58 |
+
| 1.8271 | 0.0163 | 120 | 1.7170 |
|
59 |
+
| 1.8706 | 0.0190 | 140 | 1.7042 |
|
60 |
+
| 1.8306 | 0.0217 | 160 | 1.7005 |
|
61 |
+
| 1.7954 | 0.0244 | 180 | 1.6948 |
|
62 |
+
| 1.8616 | 0.0271 | 200 | 1.6947 |
|
63 |
+
| 1.81 | 0.0298 | 220 | 1.6915 |
|
64 |
+
| 1.8003 | 0.0325 | 240 | 1.6900 |
|
65 |
+
| 1.9069 | 0.0352 | 260 | 1.6880 |
|
66 |
+
| 1.8266 | 0.0379 | 280 | 1.6868 |
|
67 |
+
| 1.8615 | 0.0406 | 300 | 1.6849 |
|
68 |
+
| 1.7728 | 0.0433 | 320 | 1.6832 |
|
69 |
+
| 1.806 | 0.0461 | 340 | 1.6824 |
|
70 |
+
| 1.8843 | 0.0488 | 360 | 1.6812 |
|
71 |
+
| 1.7655 | 0.0515 | 380 | 1.6803 |
|
72 |
+
| 1.812 | 0.0542 | 400 | 1.6795 |
|
73 |
+
| 1.8058 | 0.0569 | 420 | 1.6779 |
|
74 |
+
| 1.7424 | 0.0596 | 440 | 1.6779 |
|
75 |
+
| 1.8976 | 0.0623 | 460 | 1.6782 |
|
76 |
+
| 1.8237 | 0.0650 | 480 | 1.6778 |
|
77 |
+
| 1.8981 | 0.0677 | 500 | 1.6761 |
|
78 |
|
79 |
|
80 |
### Framework versions
|
81 |
|
82 |
+
- PEFT 0.13.0
|
83 |
+
- Transformers 4.45.0
|
84 |
+
- Pytorch 2.4.1+cu121
|
85 |
+
- Datasets 3.0.1
|
86 |
+
- Tokenizers 0.20.0
|
adapter_config.json
CHANGED
@@ -6,6 +6,7 @@
|
|
6 |
"fan_in_fan_out": false,
|
7 |
"inference_mode": true,
|
8 |
"init_lora_weights": true,
|
|
|
9 |
"layers_pattern": null,
|
10 |
"layers_to_transform": null,
|
11 |
"loftq_config": {},
|
@@ -22,5 +23,7 @@
|
|
22 |
"q_proj",
|
23 |
"v_proj"
|
24 |
],
|
25 |
-
"task_type": "CAUSAL_LM"
|
|
|
|
|
26 |
}
|
|
|
6 |
"fan_in_fan_out": false,
|
7 |
"inference_mode": true,
|
8 |
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
"layers_pattern": null,
|
11 |
"layers_to_transform": null,
|
12 |
"loftq_config": {},
|
|
|
23 |
"q_proj",
|
24 |
"v_proj"
|
25 |
],
|
26 |
+
"task_type": "CAUSAL_LM",
|
27 |
+
"use_dora": false,
|
28 |
+
"use_rslora": false
|
29 |
}
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 134235048
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c43dc04532b014d1c7de792f57ae7b7d5a3f8e6bd497a9677993c63d5ee08096
|
3 |
size 134235048
|
runs/Sep26_13-01-01_3d63c6bcbcfd/events.out.tfevents.1727355710.3d63c6bcbcfd.3916.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05dd11a33f815e82e6781d7b1fcc26afd888ad6eeadc61afc9a09d4a2cacea44
|
3 |
+
size 23452
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
tokenizer_config.json
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
|
|
4 |
"added_tokens_decoder": {
|
5 |
"0": {
|
6 |
"content": "<unk>",
|
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
4 |
+
"add_prefix_space": null,
|
5 |
"added_tokens_decoder": {
|
6 |
"0": {
|
7 |
"content": "<unk>",
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0d7d05ef4184ef5384d4cbcc938ea6aefb352a1c32c3276f279e3f32f1fe21f7
|
3 |
+
size 5496
|