hllj commited on
Commit
7c6429f
·
1 Parent(s): e20b508

Model save

Browse files
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: HuggingFaceH4/zephyr-7b-beta
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: sft-zephyr-7b-beta-v1
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # sft-zephyr-7b-beta-v1
15
+
16
+ This model is a fine-tuned version of [HuggingFaceH4/zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 0.4405
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 2e-05
38
+ - train_batch_size: 4
39
+ - eval_batch_size: 4
40
+ - seed: 42
41
+ - distributed_type: multi-GPU
42
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
+ - lr_scheduler_type: cosine
44
+ - lr_scheduler_warmup_ratio: 0.05
45
+ - num_epochs: 3
46
+ - mixed_precision_training: Native AMP
47
+
48
+ ### Training results
49
+
50
+ | Training Loss | Epoch | Step | Validation Loss |
51
+ |:-------------:|:-----:|:----:|:---------------:|
52
+ | 0.8404 | 0.19 | 50 | 0.7357 |
53
+ | 0.5565 | 0.37 | 100 | 0.5301 |
54
+ | 0.5284 | 1.14 | 150 | 0.4864 |
55
+ | 0.4912 | 1.33 | 200 | 0.4633 |
56
+ | 0.4473 | 2.1 | 250 | 0.4545 |
57
+ | 0.4285 | 2.29 | 300 | 0.4483 |
58
+
59
+
60
+ ### Framework versions
61
+
62
+ - Transformers 4.35.2
63
+ - Pytorch 2.1.0
64
+ - Datasets 2.15.0
65
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "HuggingFaceH4/zephyr-7b-beta",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "lora_alpha": 32,
12
+ "lora_dropout": 0.05,
13
+ "modules_to_save": null,
14
+ "peft_type": "LORA",
15
+ "r": 16,
16
+ "rank_pattern": {},
17
+ "revision": null,
18
+ "target_modules": [
19
+ "o_proj",
20
+ "v_proj",
21
+ "k_proj",
22
+ "q_proj"
23
+ ],
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4043efd316e6c8591ddf522aab3caddae333fdb275ba9a1cbd2b45adfff8a345
3
+ size 54560368
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.41,
3
+ "eval_loss": 0.4404671788215637,
4
+ "eval_runtime": 10.292,
5
+ "eval_samples": 120,
6
+ "eval_samples_per_second": 11.66,
7
+ "eval_steps_per_second": 2.915,
8
+ "train_loss": 0.5767455007936861,
9
+ "train_runtime": 1186.2419,
10
+ "train_samples": 1076,
11
+ "train_samples_per_second": 2.721,
12
+ "train_steps_per_second": 0.68
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.41,
3
+ "eval_loss": 0.4404671788215637,
4
+ "eval_runtime": 10.292,
5
+ "eval_samples": 120,
6
+ "eval_samples_per_second": 11.66,
7
+ "eval_steps_per_second": 2.915
8
+ }
runs/Nov17_20-55-39_7a59b30c842e/events.out.tfevents.1700254679.7a59b30c842e.13548.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58e6cfe46e8fc46e63aa43763d938afc57d38010fac08253c4b73a33b87cc775
3
+ size 11570
runs/Nov17_20-55-39_7a59b30c842e/events.out.tfevents.1700255876.7a59b30c842e.13548.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0a79d9defde9adbc8f954b5d565a0a33cc5c5324e4d1e744d3c90655cb248fd
3
+ size 359
special_tokens_map.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
+ ],
7
+ "bos_token": {
8
+ "content": "<s>",
9
+ "lstrip": false,
10
+ "normalized": false,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "eos_token": {
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "pad_token": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "unk_token": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [
31
+ "<unk>",
32
+ "<s>",
33
+ "</s>"
34
+ ],
35
+ "bos_token": "<s>",
36
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
37
+ "clean_up_tokenization_spaces": false,
38
+ "eos_token": "</s>",
39
+ "legacy": true,
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "pad_token": "</s>",
42
+ "sp_model_kwargs": {},
43
+ "spaces_between_special_tokens": false,
44
+ "tokenizer_class": "LlamaTokenizer",
45
+ "truncation_side": "left",
46
+ "unk_token": "<unk>",
47
+ "use_default_system_prompt": true
48
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.41,
3
+ "train_loss": 0.5767455007936861,
4
+ "train_runtime": 1186.2419,
5
+ "train_samples": 1076,
6
+ "train_samples_per_second": 2.721,
7
+ "train_steps_per_second": 0.68
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.412639405204461,
5
+ "eval_steps": 50,
6
+ "global_step": 333,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 4.878048780487805e-07,
14
+ "loss": 1.1781,
15
+ "step": 1
16
+ },
17
+ {
18
+ "epoch": 0.04,
19
+ "learning_rate": 4.8780487804878055e-06,
20
+ "loss": 1.1487,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 9.756097560975611e-06,
26
+ "loss": 1.1115,
27
+ "step": 20
28
+ },
29
+ {
30
+ "epoch": 0.11,
31
+ "learning_rate": 1.4634146341463415e-05,
32
+ "loss": 1.0121,
33
+ "step": 30
34
+ },
35
+ {
36
+ "epoch": 0.15,
37
+ "learning_rate": 1.9512195121951222e-05,
38
+ "loss": 0.9286,
39
+ "step": 40
40
+ },
41
+ {
42
+ "epoch": 0.19,
43
+ "learning_rate": 1.9993188419095562e-05,
44
+ "loss": 0.8404,
45
+ "step": 50
46
+ },
47
+ {
48
+ "epoch": 0.19,
49
+ "eval_loss": 0.7357456088066101,
50
+ "eval_runtime": 10.273,
51
+ "eval_samples_per_second": 11.681,
52
+ "eval_steps_per_second": 2.92,
53
+ "step": 50
54
+ },
55
+ {
56
+ "epoch": 0.22,
57
+ "learning_rate": 1.9969654126764183e-05,
58
+ "loss": 0.7129,
59
+ "step": 60
60
+ },
61
+ {
62
+ "epoch": 0.26,
63
+ "learning_rate": 1.992935260059287e-05,
64
+ "loss": 0.6721,
65
+ "step": 70
66
+ },
67
+ {
68
+ "epoch": 0.3,
69
+ "learning_rate": 1.9872351620777883e-05,
70
+ "loss": 0.6196,
71
+ "step": 80
72
+ },
73
+ {
74
+ "epoch": 0.33,
75
+ "learning_rate": 1.9798747053108098e-05,
76
+ "loss": 0.5785,
77
+ "step": 90
78
+ },
79
+ {
80
+ "epoch": 0.37,
81
+ "learning_rate": 1.9708662687735316e-05,
82
+ "loss": 0.5565,
83
+ "step": 100
84
+ },
85
+ {
86
+ "epoch": 0.37,
87
+ "eval_loss": 0.5301057696342468,
88
+ "eval_runtime": 10.2864,
89
+ "eval_samples_per_second": 11.666,
90
+ "eval_steps_per_second": 2.916,
91
+ "step": 100
92
+ },
93
+ {
94
+ "epoch": 0.41,
95
+ "learning_rate": 1.9602250030980657e-05,
96
+ "loss": 0.5421,
97
+ "step": 110
98
+ },
99
+ {
100
+ "epoch": 1.03,
101
+ "learning_rate": 1.947968805052712e-05,
102
+ "loss": 0.5197,
103
+ "step": 120
104
+ },
105
+ {
106
+ "epoch": 1.07,
107
+ "learning_rate": 1.934118287442689e-05,
108
+ "loss": 0.508,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 1.11,
113
+ "learning_rate": 1.9186967444429613e-05,
114
+ "loss": 0.5097,
115
+ "step": 140
116
+ },
117
+ {
118
+ "epoch": 1.14,
119
+ "learning_rate": 1.901730112421468e-05,
120
+ "loss": 0.5284,
121
+ "step": 150
122
+ },
123
+ {
124
+ "epoch": 1.14,
125
+ "eval_loss": 0.4864448308944702,
126
+ "eval_runtime": 10.291,
127
+ "eval_samples_per_second": 11.661,
128
+ "eval_steps_per_second": 2.915,
129
+ "step": 150
130
+ },
131
+ {
132
+ "epoch": 1.18,
133
+ "learning_rate": 1.8832469263186352e-05,
134
+ "loss": 0.5095,
135
+ "step": 160
136
+ },
137
+ {
138
+ "epoch": 1.22,
139
+ "learning_rate": 1.8632782716565438e-05,
140
+ "loss": 0.4823,
141
+ "step": 170
142
+ },
143
+ {
144
+ "epoch": 1.26,
145
+ "learning_rate": 1.841857732258457e-05,
146
+ "loss": 0.4792,
147
+ "step": 180
148
+ },
149
+ {
150
+ "epoch": 1.29,
151
+ "learning_rate": 1.8190213337666384e-05,
152
+ "loss": 0.4921,
153
+ "step": 190
154
+ },
155
+ {
156
+ "epoch": 1.33,
157
+ "learning_rate": 1.7948074830534535e-05,
158
+ "loss": 0.4912,
159
+ "step": 200
160
+ },
161
+ {
162
+ "epoch": 1.33,
163
+ "eval_loss": 0.4633093774318695,
164
+ "eval_runtime": 10.2883,
165
+ "eval_samples_per_second": 11.664,
166
+ "eval_steps_per_second": 2.916,
167
+ "step": 200
168
+ },
169
+ {
170
+ "epoch": 1.37,
171
+ "learning_rate": 1.7692569036276533e-05,
172
+ "loss": 0.4684,
173
+ "step": 210
174
+ },
175
+ {
176
+ "epoch": 1.41,
177
+ "learning_rate": 1.742412567144476e-05,
178
+ "loss": 0.4928,
179
+ "step": 220
180
+ },
181
+ {
182
+ "epoch": 2.03,
183
+ "learning_rate": 1.714319621134755e-05,
184
+ "loss": 0.4648,
185
+ "step": 230
186
+ },
187
+ {
188
+ "epoch": 2.07,
189
+ "learning_rate": 1.685025313074582e-05,
190
+ "loss": 0.4673,
191
+ "step": 240
192
+ },
193
+ {
194
+ "epoch": 2.1,
195
+ "learning_rate": 1.6545789109232247e-05,
196
+ "loss": 0.4473,
197
+ "step": 250
198
+ },
199
+ {
200
+ "epoch": 2.1,
201
+ "eval_loss": 0.45452865958213806,
202
+ "eval_runtime": 10.2907,
203
+ "eval_samples_per_second": 11.661,
204
+ "eval_steps_per_second": 2.915,
205
+ "step": 250
206
+ },
207
+ {
208
+ "epoch": 2.14,
209
+ "learning_rate": 1.6230316202629393e-05,
210
+ "loss": 0.4551,
211
+ "step": 260
212
+ },
213
+ {
214
+ "epoch": 2.18,
215
+ "learning_rate": 1.590436498180039e-05,
216
+ "loss": 0.4366,
217
+ "step": 270
218
+ },
219
+ {
220
+ "epoch": 2.22,
221
+ "learning_rate": 1.556848364032052e-05,
222
+ "loss": 0.4399,
223
+ "step": 280
224
+ },
225
+ {
226
+ "epoch": 2.25,
227
+ "learning_rate": 1.5223237072510433e-05,
228
+ "loss": 0.427,
229
+ "step": 290
230
+ },
231
+ {
232
+ "epoch": 2.29,
233
+ "learning_rate": 1.4904987486286184e-05,
234
+ "loss": 0.4285,
235
+ "step": 300
236
+ },
237
+ {
238
+ "epoch": 2.29,
239
+ "eval_loss": 0.44827380776405334,
240
+ "eval_runtime": 10.2956,
241
+ "eval_samples_per_second": 11.655,
242
+ "eval_steps_per_second": 2.914,
243
+ "step": 300
244
+ },
245
+ {
246
+ "epoch": 2.33,
247
+ "learning_rate": 1.454355889687053e-05,
248
+ "loss": 0.4233,
249
+ "step": 310
250
+ },
251
+ {
252
+ "epoch": 2.36,
253
+ "learning_rate": 1.4174488827267032e-05,
254
+ "loss": 0.4339,
255
+ "step": 320
256
+ },
257
+ {
258
+ "epoch": 2.4,
259
+ "learning_rate": 1.3798397989496549e-05,
260
+ "loss": 0.4381,
261
+ "step": 330
262
+ },
263
+ {
264
+ "epoch": 2.41,
265
+ "step": 333,
266
+ "total_flos": 5.790989170402918e+16,
267
+ "train_loss": 0.5767455007936861,
268
+ "train_runtime": 1186.2419,
269
+ "train_samples_per_second": 2.721,
270
+ "train_steps_per_second": 0.68
271
+ }
272
+ ],
273
+ "logging_steps": 10,
274
+ "max_steps": 807,
275
+ "num_train_epochs": 3,
276
+ "save_steps": 500,
277
+ "total_flos": 5.790989170402918e+16,
278
+ "trial_name": null,
279
+ "trial_params": null
280
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75e1db26d33e0bbb23a35ece413c42f84f21154a34d690cc7ed6d255e921bcb6
3
+ size 4600