ZHLiu627 commited on
Commit
a65e9a6
·
verified ·
1 Parent(s): 450b8c2

Model save

Browse files
README.md CHANGED
@@ -2,16 +2,9 @@
2
  license: apache-2.0
3
  base_model: alignment-handbook/zephyr-7b-sft-full
4
  tags:
5
- - alignment-handbook
6
  - trl
7
  - dpo
8
  - generated_from_trainer
9
- - trl
10
- - dpo
11
- - generated_from_trainer
12
- datasets:
13
- - updated
14
- - original
15
  model-index:
16
  - name: zephyr-7b-dpo-full
17
  results: []
@@ -22,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
22
 
23
  # zephyr-7b-dpo-full
24
 
25
- This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the updated and the original datasets.
26
 
27
  ## Model description
28
 
@@ -61,7 +54,7 @@ The following hyperparameters were used during training:
61
 
62
  ### Framework versions
63
 
64
- - Transformers 4.38.2
65
  - Pytorch 2.2.1+cu121
66
  - Datasets 2.14.6
67
  - Tokenizers 0.15.2
 
2
  license: apache-2.0
3
  base_model: alignment-handbook/zephyr-7b-sft-full
4
  tags:
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
 
 
 
 
8
  model-index:
9
  - name: zephyr-7b-dpo-full
10
  results: []
 
15
 
16
  # zephyr-7b-dpo-full
17
 
18
+ This model is a fine-tuned version of [alignment-handbook/zephyr-7b-sft-full](https://huggingface.co/alignment-handbook/zephyr-7b-sft-full) on the None dataset.
19
 
20
  ## Model description
21
 
 
54
 
55
  ### Framework versions
56
 
57
+ - Transformers 4.36.2
58
  - Pytorch 2.2.1+cu121
59
  - Datasets 2.14.6
60
  - Tokenizers 0.15.2
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.686960334777832,
4
- "train_runtime": 795.5553,
5
- "train_samples": 900,
6
- "train_samples_per_second": 1.131,
7
- "train_steps_per_second": 0.031
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6818081367583502,
4
+ "train_runtime": 1329.5916,
5
+ "train_samples": 1500,
6
+ "train_samples_per_second": 1.128,
7
+ "train_steps_per_second": 0.032
8
  }
config.json CHANGED
@@ -20,7 +20,7 @@
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
- "transformers_version": "4.38.2",
24
- "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
20
  "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.36.2",
24
+ "use_cache": false,
25
  "vocab_size": 32000
26
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.38.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:168aea527589d0aa22f36d9c544f076047b12e4813c1b5dc31c89d4b7de2ff31
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dbc16450b8385d558193d968d1a3489cdfa85b0eb4e37348f31c0c267ff267a
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4eaec014f0066bdbf8c588a16ffd89772ad91b0199a26acd3354ea429dba0ac
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71f20b31ad6779e594346c1b98da9a5d7839ad16a131ccf7db462bebe1b273df
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:796fee778caa45be4448a1ab0b7afbe59dc4c37a3f555473404ca53439f1652a
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753b944c7793a85217d4f0b1e395a4f61df49699a0904522077061a880acd47f
3
  size 4540516344
runs/Mar08_19-46-41_nebula/events.out.tfevents.1709948908.nebula.42292.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c99a89235d90876e288541ca01ea59ee93bc229c564826976b754b1b95d2d3cd
3
+ size 5666
runs/Mar08_20-02-54_nebula/events.out.tfevents.1709949910.nebula.43699.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73334323b4a7acb3da8f653e28b3e591f70b1e48834fc1523646b475caf6602d
3
+ size 8650
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.686960334777832,
4
- "train_runtime": 795.5553,
5
- "train_samples": 900,
6
- "train_samples_per_second": 1.131,
7
- "train_steps_per_second": 0.031
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "train_loss": 0.6818081367583502,
4
+ "train_runtime": 1329.5916,
5
+ "train_samples": 1500,
6
+ "train_samples_per_second": 1.128,
7
+ "train_steps_per_second": 0.032
8
  }
trainer_state.json CHANGED
@@ -3,21 +3,20 @@
3
  "best_model_checkpoint": null,
4
  "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.04,
13
- "grad_norm": 16.178188065123834,
14
- "learning_rate": 1.6666666666666665e-07,
15
- "logits/chosen": -2.86794376373291,
16
- "logits/rejected": -2.7572906017303467,
17
- "logps/chosen": -175.52011108398438,
18
- "logps/pi_response": -211.910888671875,
19
- "logps/ref_response": -211.910888671875,
20
- "logps/rejected": -176.99070739746094,
21
  "loss": 0.6931,
22
  "rewards/accuracies": 0.0,
23
  "rewards/chosen": 0.0,
@@ -26,51 +25,81 @@
26
  "step": 1
27
  },
28
  {
29
- "epoch": 0.4,
30
- "grad_norm": 16.903841435721354,
31
- "learning_rate": 3.851602043638994e-07,
32
- "logits/chosen": -2.7485318183898926,
33
- "logits/rejected": -2.6173155307769775,
34
- "logps/chosen": -275.3558349609375,
35
- "logps/pi_response": -155.98851013183594,
36
- "logps/ref_response": -156.92300415039062,
37
- "logps/rejected": -253.09535217285156,
38
- "loss": 0.6914,
39
- "rewards/accuracies": 0.4722222089767456,
40
- "rewards/chosen": 0.010561762377619743,
41
- "rewards/margins": 0.002931868424639106,
42
- "rewards/rejected": 0.007629893720149994,
43
  "step": 10
44
  },
45
  {
46
- "epoch": 0.8,
47
- "grad_norm": 13.969736792308046,
48
- "learning_rate": 6.106260641143546e-08,
49
- "logits/chosen": -2.744602680206299,
50
- "logits/rejected": -2.706644296646118,
51
- "logps/chosen": -293.81134033203125,
52
- "logps/pi_response": -228.12771606445312,
53
- "logps/ref_response": -232.7391815185547,
54
- "logps/rejected": -310.80322265625,
55
- "loss": 0.6837,
56
- "rewards/accuracies": 0.625,
57
- "rewards/chosen": 0.0664653405547142,
58
- "rewards/margins": 0.01980663277208805,
59
- "rewards/rejected": 0.0466587133705616,
60
  "step": 20
61
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  {
63
  "epoch": 1.0,
64
- "step": 25,
65
  "total_flos": 0.0,
66
- "train_loss": 0.686960334777832,
67
- "train_runtime": 795.5553,
68
- "train_samples_per_second": 1.131,
69
- "train_steps_per_second": 0.031
70
  }
71
  ],
72
  "logging_steps": 10,
73
- "max_steps": 25,
74
  "num_input_tokens_seen": 0,
75
  "num_train_epochs": 1,
76
  "save_steps": 50,
 
3
  "best_model_checkpoint": null,
4
  "epoch": 1.0,
5
  "eval_steps": 500,
6
+ "global_step": 42,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.02,
13
+ "learning_rate": 1e-07,
14
+ "logits/chosen": -2.961165428161621,
15
+ "logits/rejected": -2.9065260887145996,
16
+ "logps/chosen": -301.09039306640625,
17
+ "logps/pi_response": -154.45700073242188,
18
+ "logps/ref_response": -154.45700073242188,
19
+ "logps/rejected": -312.41326904296875,
 
20
  "loss": 0.6931,
21
  "rewards/accuracies": 0.0,
22
  "rewards/chosen": 0.0,
 
25
  "step": 1
26
  },
27
  {
28
+ "epoch": 0.24,
29
+ "learning_rate": 4.778071225970339e-07,
30
+ "logits/chosen": -2.816866159439087,
31
+ "logits/rejected": -2.6957647800445557,
32
+ "logps/chosen": -267.56488037109375,
33
+ "logps/pi_response": -182.86256408691406,
34
+ "logps/ref_response": -182.52423095703125,
35
+ "logps/rejected": -226.00772094726562,
36
+ "loss": 0.6925,
37
+ "rewards/accuracies": 0.4444444477558136,
38
+ "rewards/chosen": -0.0020490202587097883,
39
+ "rewards/margins": 0.002598464023321867,
40
+ "rewards/rejected": -0.004647484514862299,
 
41
  "step": 10
42
  },
43
  {
44
+ "epoch": 0.48,
45
+ "learning_rate": 3.2320569281913754e-07,
46
+ "logits/chosen": -2.7879672050476074,
47
+ "logits/rejected": -2.814840078353882,
48
+ "logps/chosen": -255.08975219726562,
49
+ "logps/pi_response": -235.3483428955078,
50
+ "logps/ref_response": -233.54464721679688,
51
+ "logps/rejected": -263.6041564941406,
52
+ "loss": 0.6868,
53
+ "rewards/accuracies": 0.7250000238418579,
54
+ "rewards/chosen": -0.014762332662940025,
55
+ "rewards/margins": 0.012134673073887825,
56
+ "rewards/rejected": -0.026897007599473,
 
57
  "step": 20
58
  },
59
+ {
60
+ "epoch": 0.71,
61
+ "learning_rate": 1.189231791106921e-07,
62
+ "logits/chosen": -2.702427387237549,
63
+ "logits/rejected": -2.7164032459259033,
64
+ "logps/chosen": -194.59146118164062,
65
+ "logps/pi_response": -153.37057495117188,
66
+ "logps/ref_response": -152.309814453125,
67
+ "logps/rejected": -225.66796875,
68
+ "loss": 0.6759,
69
+ "rewards/accuracies": 0.6499999761581421,
70
+ "rewards/chosen": 0.0013326064217835665,
71
+ "rewards/margins": 0.022419044747948647,
72
+ "rewards/rejected": -0.02108643762767315,
73
+ "step": 30
74
+ },
75
+ {
76
+ "epoch": 0.95,
77
+ "learning_rate": 3.5960224130728858e-09,
78
+ "logits/chosen": -2.7800514698028564,
79
+ "logits/rejected": -2.6079983711242676,
80
+ "logps/chosen": -291.4418029785156,
81
+ "logps/pi_response": -189.13177490234375,
82
+ "logps/ref_response": -187.91622924804688,
83
+ "logps/rejected": -244.4289093017578,
84
+ "loss": 0.6732,
85
+ "rewards/accuracies": 0.574999988079071,
86
+ "rewards/chosen": -0.02857491932809353,
87
+ "rewards/margins": 0.013249958865344524,
88
+ "rewards/rejected": -0.041824884712696075,
89
+ "step": 40
90
+ },
91
  {
92
  "epoch": 1.0,
93
+ "step": 42,
94
  "total_flos": 0.0,
95
+ "train_loss": 0.6818081367583502,
96
+ "train_runtime": 1329.5916,
97
+ "train_samples_per_second": 1.128,
98
+ "train_steps_per_second": 0.032
99
  }
100
  ],
101
  "logging_steps": 10,
102
+ "max_steps": 42,
103
  "num_input_tokens_seen": 0,
104
  "num_train_epochs": 1,
105
  "save_steps": 50,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7e2f0ad6f5b91a2f546d4b6db7dc443ce15c0a03b6e3b63e7562dcbff75bdd2
3
- size 6136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55111e9360cd23b86bd1915f7803c8b695ba266ed14c148454f14bcfd958a0a6
3
+ size 6008