ericrisco commited on
Commit
a2cff51
·
verified ·
1 Parent(s): 7af9388

ericrisco/llama2-instruct-tune-500s

Browse files
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
 
 
 
2
  library_name: peft
3
  tags:
4
  - trl
5
  - sft
6
  - generated_from_trainer
7
- datasets:
8
- - generator
9
- base_model: NousResearch/Llama-2-7b-hf
10
  model-index:
11
  - name: llama2_instruct_generation
12
  results: []
@@ -19,7 +19,7 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  This model is a fine-tuned version of [NousResearch/Llama-2-7b-hf](https://huggingface.co/NousResearch/Llama-2-7b-hf) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 1.6737
23
 
24
  ## Model description
25
 
@@ -44,44 +44,43 @@ The following hyperparameters were used during training:
44
  - seed: 42
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: constant
47
- - lr_scheduler_warmup_steps: 0.03
48
  - training_steps: 500
49
 
50
  ### Training results
51
 
52
- | Training Loss | Epoch | Step | Validation Loss |
53
- |:-------------:|:-----:|:----:|:---------------:|
54
- | 1.9238 | 0.0 | 20 | 1.8060 |
55
- | 1.9181 | 0.01 | 40 | 1.7781 |
56
- | 1.8185 | 0.01 | 60 | 1.7652 |
57
- | 1.8336 | 0.01 | 80 | 1.7504 |
58
- | 1.8205 | 0.01 | 100 | 1.7275 |
59
- | 1.7673 | 0.02 | 120 | 1.7090 |
60
- | 1.7934 | 0.02 | 140 | 1.6996 |
61
- | 1.7844 | 0.02 | 160 | 1.6953 |
62
- | 1.7847 | 0.02 | 180 | 1.6932 |
63
- | 1.7772 | 0.03 | 200 | 1.6897 |
64
- | 1.7883 | 0.03 | 220 | 1.6896 |
65
- | 1.8346 | 0.03 | 240 | 1.6852 |
66
- | 1.6646 | 0.04 | 260 | 1.6834 |
67
- | 1.7433 | 0.04 | 280 | 1.6822 |
68
- | 1.7762 | 0.04 | 300 | 1.6809 |
69
- | 1.8508 | 0.04 | 320 | 1.6796 |
70
- | 1.7275 | 0.05 | 340 | 1.6791 |
71
- | 1.8002 | 0.05 | 360 | 1.6755 |
72
- | 1.855 | 0.05 | 380 | 1.6768 |
73
- | 1.7529 | 0.05 | 400 | 1.6751 |
74
- | 1.7728 | 0.06 | 420 | 1.6737 |
75
- | 1.8053 | 0.06 | 440 | 1.6737 |
76
- | 1.8579 | 0.06 | 460 | 1.6736 |
77
- | 1.7383 | 0.07 | 480 | 1.6735 |
78
- | 1.9282 | 0.07 | 500 | 1.6737 |
79
 
80
 
81
  ### Framework versions
82
 
83
- - PEFT 0.7.1
84
- - Transformers 4.36.2
85
- - Pytorch 2.1.0+cu121
86
- - Datasets 2.16.1
87
- - Tokenizers 0.15.0
 
1
  ---
2
+ base_model: NousResearch/Llama-2-7b-hf
3
+ datasets:
4
+ - generator
5
  library_name: peft
6
  tags:
7
  - trl
8
  - sft
9
  - generated_from_trainer
 
 
 
10
  model-index:
11
  - name: llama2_instruct_generation
12
  results: []
 
19
 
20
  This model is a fine-tuned version of [NousResearch/Llama-2-7b-hf](https://huggingface.co/NousResearch/Llama-2-7b-hf) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
+ - Loss: 1.6761
23
 
24
  ## Model description
25
 
 
44
  - seed: 42
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: constant
 
47
  - training_steps: 500
48
 
49
  ### Training results
50
 
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:------:|:----:|:---------------:|
53
+ | 1.9468 | 0.0027 | 20 | 1.8141 |
54
+ | 1.8737 | 0.0054 | 40 | 1.7848 |
55
+ | 1.8769 | 0.0081 | 60 | 1.7718 |
56
+ | 1.8634 | 0.0108 | 80 | 1.7598 |
57
+ | 1.8584 | 0.0135 | 100 | 1.7469 |
58
+ | 1.8271 | 0.0163 | 120 | 1.7170 |
59
+ | 1.8706 | 0.0190 | 140 | 1.7042 |
60
+ | 1.8306 | 0.0217 | 160 | 1.7005 |
61
+ | 1.7954 | 0.0244 | 180 | 1.6948 |
62
+ | 1.8616 | 0.0271 | 200 | 1.6947 |
63
+ | 1.81 | 0.0298 | 220 | 1.6915 |
64
+ | 1.8003 | 0.0325 | 240 | 1.6900 |
65
+ | 1.9069 | 0.0352 | 260 | 1.6880 |
66
+ | 1.8266 | 0.0379 | 280 | 1.6868 |
67
+ | 1.8615 | 0.0406 | 300 | 1.6849 |
68
+ | 1.7728 | 0.0433 | 320 | 1.6832 |
69
+ | 1.806 | 0.0461 | 340 | 1.6824 |
70
+ | 1.8843 | 0.0488 | 360 | 1.6812 |
71
+ | 1.7655 | 0.0515 | 380 | 1.6803 |
72
+ | 1.812 | 0.0542 | 400 | 1.6795 |
73
+ | 1.8058 | 0.0569 | 420 | 1.6779 |
74
+ | 1.7424 | 0.0596 | 440 | 1.6779 |
75
+ | 1.8976 | 0.0623 | 460 | 1.6782 |
76
+ | 1.8237 | 0.0650 | 480 | 1.6778 |
77
+ | 1.8981 | 0.0677 | 500 | 1.6761 |
78
 
79
 
80
  ### Framework versions
81
 
82
+ - PEFT 0.13.0
83
+ - Transformers 4.45.0
84
+ - Pytorch 2.4.1+cu121
85
+ - Datasets 3.0.1
86
+ - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -6,6 +6,7 @@
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
@@ -22,5 +23,7 @@
22
  "q_proj",
23
  "v_proj"
24
  ],
25
- "task_type": "CAUSAL_LM"
 
 
26
  }
 
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
8
  "init_lora_weights": true,
9
+ "layer_replication": null,
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
 
23
  "q_proj",
24
  "v_proj"
25
  ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:899c28f6b49a4b08069ad1c2e1c95d0eac312b73b25d6bbb2f9b9bcebb8711a8
3
  size 134235048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c43dc04532b014d1c7de792f57ae7b7d5a3f8e6bd497a9677993c63d5ee08096
3
  size 134235048
runs/Sep26_13-01-01_3d63c6bcbcfd/events.out.tfevents.1727355710.3d63c6bcbcfd.3916.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05dd11a33f815e82e6781d7b1fcc26afd888ad6eeadc61afc9a09d4a2cacea44
3
+ size 23452
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
 
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
 
1
  {
2
  "add_bos_token": true,
3
  "add_eos_token": false,
4
+ "add_prefix_space": null,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17e7ba8e5f6bd62a4c225faedf20cbd892301185759ca884b25fa624a3f1aa2c
3
- size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d7d05ef4184ef5384d4cbcc938ea6aefb352a1c32c3276f279e3f32f1fe21f7
3
+ size 5496