Token Classification
GLiNER
PyTorch
multilingual
NER
GLiNER
information extraction
encoder
entity recognition
Ihor commited on
Commit
e252929
·
verified ·
1 Parent(s): 8971e6e

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. gliner_config.json +3 -3
  2. pytorch_model.bin +1 -1
  3. trainer_state.json +523 -105
gliner_config.json CHANGED
@@ -179,14 +179,14 @@
179
  "use_cache": true,
180
  "vocab_size": 30522
181
  },
182
- "log_dir": "biencoder/",
183
  "loss_alpha": -1,
184
  "loss_gamma": 0,
185
  "loss_reduction": "sum",
186
  "lr_encoder": "1e-5",
187
  "lr_others": "5e-5",
188
  "max_grad_norm": 10.0,
189
- "max_len": 386,
190
  "max_neg_type_ratio": 1,
191
  "max_types": 25,
192
  "max_width": 12,
@@ -206,7 +206,7 @@
206
  "span_mode": "markerV0",
207
  "subtoken_pooling": "first",
208
  "train_batch_size": 8,
209
- "train_data": "data.json",
210
  "transformers_version": "4.40.2",
211
  "val_data_dir": "none",
212
  "vocab_size": -1,
 
179
  "use_cache": true,
180
  "vocab_size": 30522
181
  },
182
+ "log_dir": "deberta/",
183
  "loss_alpha": -1,
184
  "loss_gamma": 0,
185
  "loss_reduction": "sum",
186
  "lr_encoder": "1e-5",
187
  "lr_others": "5e-5",
188
  "max_grad_norm": 10.0,
189
+ "max_len": 1024,
190
  "max_neg_type_ratio": 1,
191
  "max_types": 25,
192
  "max_width": 12,
 
206
  "span_mode": "markerV0",
207
  "subtoken_pooling": "first",
208
  "train_batch_size": 8,
209
+ "train_data": "data/nuner_train.json",
210
  "transformers_version": "4.40.2",
211
  "val_data_dir": "none",
212
  "vocab_size": -1,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:13bb508286910c23fd3c0260759389521add1863bf71c4f468c9dec3c7d1aea5
3
  size 756513098
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35dc537205997fdcf5d9ced0ce976a62faacb84c1f3d6dec492e3fe0be333f75
3
  size 756513098
trainer_state.json CHANGED
@@ -1,177 +1,595 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 4.0558810274898605,
5
  "eval_steps": 500,
6
- "global_step": 9000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.22532672374943669,
13
- "grad_norm": 1464.645751953125,
14
- "learning_rate": 1.6666666666666667e-06,
15
- "loss": 419.737,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  "step": 500
17
  },
18
  {
19
- "epoch": 0.45065344749887337,
20
- "grad_norm": 3850.259033203125,
21
- "learning_rate": 3.3333333333333333e-06,
22
- "loss": 377.5664,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  "step": 1000
24
  },
25
  {
26
- "epoch": 0.67598017124831,
27
- "grad_norm": 1552.3658447265625,
28
- "learning_rate": 5e-06,
29
- "loss": 359.1407,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  "step": 1500
31
  },
32
  {
33
- "epoch": 0.9013068949977467,
34
- "grad_norm": 1931.043212890625,
35
- "learning_rate": 6.666666666666667e-06,
36
- "loss": 363.6383,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  "step": 2000
38
  },
39
  {
40
- "epoch": 1.0,
41
- "eval_loss": 337.4689025878906,
42
- "eval_runtime": 13.0417,
43
- "eval_samples_per_second": 151.284,
44
- "eval_steps_per_second": 18.939,
45
- "step": 2219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  },
47
  {
48
- "epoch": 1.1266336187471835,
49
- "grad_norm": 1212.9539794921875,
50
- "learning_rate": 8.333333333333334e-06,
51
- "loss": 344.8475,
52
  "step": 2500
53
  },
54
  {
55
- "epoch": 1.35196034249662,
56
- "grad_norm": 1241.7996826171875,
57
- "learning_rate": 1e-05,
58
- "loss": 350.3084,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  "step": 3000
60
  },
61
  {
62
- "epoch": 1.5772870662460567,
63
- "grad_norm": 2845.786376953125,
64
- "learning_rate": 9.991540791356342e-06,
65
- "loss": 341.8033,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  "step": 3500
67
  },
68
  {
69
- "epoch": 1.8026137899954935,
70
- "grad_norm": 1714.59423828125,
71
- "learning_rate": 9.966191788709716e-06,
72
- "loss": 337.2936,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  "step": 4000
74
  },
75
  {
76
- "epoch": 2.0,
77
- "eval_loss": 326.2698974609375,
78
- "eval_runtime": 13.0537,
79
- "eval_samples_per_second": 151.145,
80
- "eval_steps_per_second": 18.922,
81
- "step": 4438
82
  },
83
  {
84
- "epoch": 2.0279405137449302,
85
- "grad_norm": 2061.15966796875,
86
- "learning_rate": 9.924038765061042e-06,
87
- "loss": 338.1808,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  "step": 4500
89
  },
90
  {
91
- "epoch": 2.253267237494367,
92
- "grad_norm": 1502.1851806640625,
93
- "learning_rate": 9.86522435289912e-06,
94
- "loss": 325.4306,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  "step": 5000
96
  },
97
  {
98
- "epoch": 2.4785939612438037,
99
- "grad_norm": 1462.711669921875,
100
- "learning_rate": 9.789947561577445e-06,
101
- "loss": 320.5629,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  "step": 5500
103
  },
104
  {
105
- "epoch": 2.70392068499324,
106
- "grad_norm": 3903.998046875,
107
- "learning_rate": 9.698463103929542e-06,
108
- "loss": 322.1042,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  "step": 6000
110
  },
111
  {
112
- "epoch": 2.9292474087426768,
113
- "grad_norm": 1786.909423828125,
114
- "learning_rate": 9.591080534401371e-06,
115
- "loss": 327.4969,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  "step": 6500
117
  },
118
  {
119
- "epoch": 3.0,
120
- "eval_loss": 320.3514099121094,
121
- "eval_runtime": 13.0727,
122
- "eval_samples_per_second": 150.926,
123
- "eval_steps_per_second": 18.894,
124
- "step": 6657
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  },
126
  {
127
- "epoch": 3.1545741324921135,
128
- "grad_norm": 1130.3773193359375,
129
- "learning_rate": 9.468163201617063e-06,
130
- "loss": 300.7485,
131
  "step": 7000
132
  },
133
  {
134
- "epoch": 3.3799008562415502,
135
- "grad_norm": 1423.0567626953125,
136
- "learning_rate": 9.330127018922195e-06,
137
- "loss": 312.8204,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  "step": 7500
139
  },
140
  {
141
- "epoch": 3.605227579990987,
142
- "grad_norm": 1232.428955078125,
143
- "learning_rate": 9.177439057064684e-06,
144
- "loss": 307.7865,
145
- "step": 8000
 
 
 
 
 
 
 
146
  },
147
  {
148
- "epoch": 3.8305543037404237,
149
- "grad_norm": 1071.1146240234375,
150
- "learning_rate": 9.01061596377522e-06,
151
- "loss": 295.8372,
152
- "step": 8500
153
  },
154
  {
155
- "epoch": 4.0,
156
- "eval_loss": 318.1500244140625,
157
- "eval_runtime": 13.0462,
158
- "eval_samples_per_second": 151.232,
159
- "eval_steps_per_second": 18.933,
160
- "step": 8876
 
 
 
 
 
 
 
161
  },
162
  {
163
- "epoch": 4.0558810274898605,
164
- "grad_norm": 2371.276123046875,
165
- "learning_rate": 8.83022221559489e-06,
166
- "loss": 303.3605,
167
- "step": 9000
168
  }
169
  ],
170
- "logging_steps": 500,
171
- "max_steps": 30000,
172
  "num_input_tokens_seen": 0,
173
- "num_train_epochs": 14,
174
- "save_steps": 3000,
175
  "total_flos": 0.0,
176
  "train_batch_size": 8,
177
  "trial_name": null,
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.039255671679837,
5
  "eval_steps": 500,
6
+ "global_step": 8000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.02549069589599796,
13
+ "grad_norm": 2598.015380859375,
14
+ "learning_rate": 2.5e-06,
15
+ "loss": 223.1421,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.05098139179199592,
20
+ "grad_norm": 296.58673095703125,
21
+ "learning_rate": 5e-06,
22
+ "loss": 103.1948,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.07647208768799388,
27
+ "grad_norm": 1187.214111328125,
28
+ "learning_rate": 7.500000000000001e-06,
29
+ "loss": 76.4616,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.10196278358399184,
34
+ "grad_norm": 1285.8101806640625,
35
+ "learning_rate": 1e-05,
36
+ "loss": 75.5666,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.1274534794799898,
41
+ "grad_norm": 661.5672607421875,
42
+ "learning_rate": 9.995728791936505e-06,
43
+ "loss": 64.9031,
44
  "step": 500
45
  },
46
  {
47
+ "epoch": 0.15294417537598776,
48
+ "grad_norm": 371.0738220214844,
49
+ "learning_rate": 9.98292246503335e-06,
50
+ "loss": 65.2197,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.17843487127198573,
55
+ "grad_norm": 403.7045593261719,
56
+ "learning_rate": 9.961602898685225e-06,
57
+ "loss": 69.9518,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.20392556716798368,
62
+ "grad_norm": 431.2552490234375,
63
+ "learning_rate": 9.931806517013612e-06,
64
+ "loss": 63.1313,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.22941626306398163,
69
+ "grad_norm": 324.2401123046875,
70
+ "learning_rate": 9.893584226636773e-06,
71
+ "loss": 63.1745,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.2549069589599796,
76
+ "grad_norm": 527.2928466796875,
77
+ "learning_rate": 9.847001329696653e-06,
78
+ "loss": 63.3554,
79
  "step": 1000
80
  },
81
  {
82
+ "epoch": 0.28039765485597756,
83
+ "grad_norm": 311.36553955078125,
84
+ "learning_rate": 9.792137412291265e-06,
85
+ "loss": 64.9263,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.3058883507519755,
90
+ "grad_norm": 1101.414794921875,
91
+ "learning_rate": 9.729086208503174e-06,
92
+ "loss": 66.4159,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.33137904664797346,
97
+ "grad_norm": 276.3797302246094,
98
+ "learning_rate": 9.657955440256396e-06,
99
+ "loss": 60.7144,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.35686974254397147,
104
+ "grad_norm": 403.9580383300781,
105
+ "learning_rate": 9.578866633275289e-06,
106
+ "loss": 66.2698,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.3823604384399694,
111
+ "grad_norm": 224.3489532470703,
112
+ "learning_rate": 9.491954909459895e-06,
113
+ "loss": 59.2869,
114
  "step": 1500
115
  },
116
  {
117
+ "epoch": 0.40785113433596737,
118
+ "grad_norm": 315.9915771484375,
119
+ "learning_rate": 9.397368756032445e-06,
120
+ "loss": 59.4493,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.4333418302319653,
125
+ "grad_norm": 286.55816650390625,
126
+ "learning_rate": 9.295269771849426e-06,
127
+ "loss": 60.4814,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.45883252612796327,
132
+ "grad_norm": 323.67498779296875,
133
+ "learning_rate": 9.185832391312644e-06,
134
+ "loss": 58.4063,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.4843232220239613,
139
+ "grad_norm": 375.53961181640625,
140
+ "learning_rate": 9.069243586350976e-06,
141
+ "loss": 57.9836,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.5098139179199592,
146
+ "grad_norm": 191.06105041503906,
147
+ "learning_rate": 8.94570254698197e-06,
148
+ "loss": 59.9295,
149
  "step": 2000
150
  },
151
  {
152
+ "epoch": 0.5353046138159572,
153
+ "grad_norm": 233.01463317871094,
154
+ "learning_rate": 8.815420340999034e-06,
155
+ "loss": 56.8298,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.5607953097119551,
160
+ "grad_norm": 277.080078125,
161
+ "learning_rate": 8.67861955336566e-06,
162
+ "loss": 58.9617,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.5862860056079531,
167
+ "grad_norm": 492.3611145019531,
168
+ "learning_rate": 8.535533905932739e-06,
169
+ "loss": 62.3042,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.611776701503951,
174
+ "grad_norm": 252.20265197753906,
175
+ "learning_rate": 8.386407858128707e-06,
176
+ "loss": 56.0719,
177
+ "step": 2400
178
  },
179
  {
180
+ "epoch": 0.637267397399949,
181
+ "grad_norm": 355.2184753417969,
182
+ "learning_rate": 8.231496189304704e-06,
183
+ "loss": 55.5271,
184
  "step": 2500
185
  },
186
  {
187
+ "epoch": 0.6627580932959469,
188
+ "grad_norm": 468.17657470703125,
189
+ "learning_rate": 8.071063563448341e-06,
190
+ "loss": 59.2408,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 0.688248789191945,
195
+ "grad_norm": 542.2239990234375,
196
+ "learning_rate": 7.905384077009693e-06,
197
+ "loss": 52.7214,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 0.7137394850879429,
202
+ "grad_norm": 538.4288330078125,
203
+ "learning_rate": 7.734740790612137e-06,
204
+ "loss": 59.0205,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 0.7392301809839409,
209
+ "grad_norm": 411.6714172363281,
210
+ "learning_rate": 7.559425245448006e-06,
211
+ "loss": 60.3362,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 0.7647208768799388,
216
+ "grad_norm": 310.0589904785156,
217
+ "learning_rate": 7.379736965185369e-06,
218
+ "loss": 52.7548,
219
  "step": 3000
220
  },
221
  {
222
+ "epoch": 0.7902115727759368,
223
+ "grad_norm": 224.0525665283203,
224
+ "learning_rate": 7.195982944236853e-06,
225
+ "loss": 49.1827,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 0.8157022686719347,
230
+ "grad_norm": 255.24600219726562,
231
+ "learning_rate": 7.008477123264849e-06,
232
+ "loss": 58.0803,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 0.8411929645679327,
237
+ "grad_norm": 490.8277282714844,
238
+ "learning_rate": 6.817539852819149e-06,
239
+ "loss": 59.3456,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 0.8666836604639306,
244
+ "grad_norm": 1017.12353515625,
245
+ "learning_rate": 6.6234973460234184e-06,
246
+ "loss": 52.9023,
247
+ "step": 3400
248
+ },
249
+ {
250
+ "epoch": 0.8921743563599286,
251
+ "grad_norm": 243.35829162597656,
252
+ "learning_rate": 6.426681121245527e-06,
253
+ "loss": 55.5555,
254
  "step": 3500
255
  },
256
  {
257
+ "epoch": 0.9176650522559265,
258
+ "grad_norm": 629.7377319335938,
259
+ "learning_rate": 6.227427435703997e-06,
260
+ "loss": 53.5047,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 0.9431557481519246,
265
+ "grad_norm": 511.2954406738281,
266
+ "learning_rate": 6.026076710978172e-06,
267
+ "loss": 51.0799,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 0.9686464440479226,
272
+ "grad_norm": 360.78289794921875,
273
+ "learning_rate": 5.82297295140367e-06,
274
+ "loss": 54.3857,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 0.9941371399439205,
279
+ "grad_norm": 400.1549987792969,
280
+ "learning_rate": 5.61846315634674e-06,
281
+ "loss": 51.9264,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 1.0,
286
+ "eval_loss": 3177.868896484375,
287
+ "eval_runtime": 10.4771,
288
+ "eval_samples_per_second": 332.821,
289
+ "eval_steps_per_second": 41.615,
290
+ "step": 3923
291
+ },
292
+ {
293
+ "epoch": 1.0196278358399185,
294
+ "grad_norm": 331.4139099121094,
295
+ "learning_rate": 5.412896727361663e-06,
296
+ "loss": 53.4844,
297
  "step": 4000
298
  },
299
  {
300
+ "epoch": 1.0451185317359164,
301
+ "grad_norm": 368.0908203125,
302
+ "learning_rate": 5.206624871244066e-06,
303
+ "loss": 58.5974,
304
+ "step": 4100
 
305
  },
306
  {
307
+ "epoch": 1.0706092276319144,
308
+ "grad_norm": 436.7818603515625,
309
+ "learning_rate": 5e-06,
310
+ "loss": 52.2036,
311
+ "step": 4200
312
+ },
313
+ {
314
+ "epoch": 1.0960999235279123,
315
+ "grad_norm": 370.1662902832031,
316
+ "learning_rate": 4.793375128755934e-06,
317
+ "loss": 57.4233,
318
+ "step": 4300
319
+ },
320
+ {
321
+ "epoch": 1.1215906194239103,
322
+ "grad_norm": 947.5311889648438,
323
+ "learning_rate": 4.587103272638339e-06,
324
+ "loss": 54.2669,
325
+ "step": 4400
326
+ },
327
+ {
328
+ "epoch": 1.1470813153199082,
329
+ "grad_norm": 219.2557830810547,
330
+ "learning_rate": 4.381536843653262e-06,
331
+ "loss": 52.8739,
332
  "step": 4500
333
  },
334
  {
335
+ "epoch": 1.1725720112159062,
336
+ "grad_norm": 219.25851440429688,
337
+ "learning_rate": 4.17702704859633e-06,
338
+ "loss": 49.8615,
339
+ "step": 4600
340
+ },
341
+ {
342
+ "epoch": 1.198062707111904,
343
+ "grad_norm": 268.1202087402344,
344
+ "learning_rate": 3.973923289021829e-06,
345
+ "loss": 52.5382,
346
+ "step": 4700
347
+ },
348
+ {
349
+ "epoch": 1.223553403007902,
350
+ "grad_norm": 300.58795166015625,
351
+ "learning_rate": 3.7725725642960047e-06,
352
+ "loss": 56.45,
353
+ "step": 4800
354
+ },
355
+ {
356
+ "epoch": 1.2490440989039,
357
+ "grad_norm": 596.579833984375,
358
+ "learning_rate": 3.573318878754475e-06,
359
+ "loss": 50.8593,
360
+ "step": 4900
361
+ },
362
+ {
363
+ "epoch": 1.274534794799898,
364
+ "grad_norm": 313.270751953125,
365
+ "learning_rate": 3.3765026539765832e-06,
366
+ "loss": 58.2715,
367
  "step": 5000
368
  },
369
  {
370
+ "epoch": 1.3000254906958961,
371
+ "grad_norm": 755.581298828125,
372
+ "learning_rate": 3.1824601471808504e-06,
373
+ "loss": 54.2177,
374
+ "step": 5100
375
+ },
376
+ {
377
+ "epoch": 1.3255161865918939,
378
+ "grad_norm": 184.0687713623047,
379
+ "learning_rate": 2.991522876735154e-06,
380
+ "loss": 49.5836,
381
+ "step": 5200
382
+ },
383
+ {
384
+ "epoch": 1.351006882487892,
385
+ "grad_norm": 264.9710693359375,
386
+ "learning_rate": 2.804017055763149e-06,
387
+ "loss": 48.2091,
388
+ "step": 5300
389
+ },
390
+ {
391
+ "epoch": 1.3764975783838898,
392
+ "grad_norm": 447.72174072265625,
393
+ "learning_rate": 2.6202630348146323e-06,
394
+ "loss": 47.0714,
395
+ "step": 5400
396
+ },
397
+ {
398
+ "epoch": 1.401988274279888,
399
+ "grad_norm": 397.1292419433594,
400
+ "learning_rate": 2.4405747545519966e-06,
401
+ "loss": 50.4874,
402
  "step": 5500
403
  },
404
  {
405
+ "epoch": 1.4274789701758859,
406
+ "grad_norm": 407.992431640625,
407
+ "learning_rate": 2.265259209387867e-06,
408
+ "loss": 48.07,
409
+ "step": 5600
410
+ },
411
+ {
412
+ "epoch": 1.4529696660718838,
413
+ "grad_norm": 370.75030517578125,
414
+ "learning_rate": 2.094615922990309e-06,
415
+ "loss": 52.0392,
416
+ "step": 5700
417
+ },
418
+ {
419
+ "epoch": 1.4784603619678818,
420
+ "grad_norm": 201.114013671875,
421
+ "learning_rate": 1.928936436551661e-06,
422
+ "loss": 52.9536,
423
+ "step": 5800
424
+ },
425
+ {
426
+ "epoch": 1.5039510578638797,
427
+ "grad_norm": 288.3627624511719,
428
+ "learning_rate": 1.7685038106952952e-06,
429
+ "loss": 51.5337,
430
+ "step": 5900
431
+ },
432
+ {
433
+ "epoch": 1.5294417537598777,
434
+ "grad_norm": 529.8703002929688,
435
+ "learning_rate": 1.6135921418712959e-06,
436
+ "loss": 53.8545,
437
  "step": 6000
438
  },
439
  {
440
+ "epoch": 1.5549324496558756,
441
+ "grad_norm": 266.9444885253906,
442
+ "learning_rate": 1.4644660940672628e-06,
443
+ "loss": 44.9202,
444
+ "step": 6100
445
+ },
446
+ {
447
+ "epoch": 1.5804231455518736,
448
+ "grad_norm": 481.2966613769531,
449
+ "learning_rate": 1.321380446634342e-06,
450
+ "loss": 48.9966,
451
+ "step": 6200
452
+ },
453
+ {
454
+ "epoch": 1.6059138414478715,
455
+ "grad_norm": 225.82679748535156,
456
+ "learning_rate": 1.1845796590009684e-06,
457
+ "loss": 52.9983,
458
+ "step": 6300
459
+ },
460
+ {
461
+ "epoch": 1.6314045373438695,
462
+ "grad_norm": 269.080810546875,
463
+ "learning_rate": 1.0542974530180327e-06,
464
+ "loss": 50.8469,
465
+ "step": 6400
466
+ },
467
+ {
468
+ "epoch": 1.6568952332398674,
469
+ "grad_norm": 181.23312377929688,
470
+ "learning_rate": 9.307564136490255e-07,
471
+ "loss": 51.3111,
472
  "step": 6500
473
  },
474
  {
475
+ "epoch": 1.6823859291358654,
476
+ "grad_norm": 288.37408447265625,
477
+ "learning_rate": 8.141676086873574e-07,
478
+ "loss": 51.2199,
479
+ "step": 6600
480
+ },
481
+ {
482
+ "epoch": 1.7078766250318633,
483
+ "grad_norm": 295.11163330078125,
484
+ "learning_rate": 7.047302281505735e-07,
485
+ "loss": 55.1311,
486
+ "step": 6700
487
+ },
488
+ {
489
+ "epoch": 1.7333673209278613,
490
+ "grad_norm": 606.4634399414062,
491
+ "learning_rate": 6.026312439675553e-07,
492
+ "loss": 50.0297,
493
+ "step": 6800
494
+ },
495
+ {
496
+ "epoch": 1.7588580168238592,
497
+ "grad_norm": 400.3865661621094,
498
+ "learning_rate": 5.080450905401057e-07,
499
+ "loss": 50.0489,
500
+ "step": 6900
501
  },
502
  {
503
+ "epoch": 1.7843487127198574,
504
+ "grad_norm": 709.5574340820312,
505
+ "learning_rate": 4.211333667247125e-07,
506
+ "loss": 55.2417,
507
  "step": 7000
508
  },
509
  {
510
+ "epoch": 1.8098394086158551,
511
+ "grad_norm": 219.62893676757812,
512
+ "learning_rate": 3.420445597436056e-07,
513
+ "loss": 49.912,
514
+ "step": 7100
515
+ },
516
+ {
517
+ "epoch": 1.8353301045118533,
518
+ "grad_norm": 568.156494140625,
519
+ "learning_rate": 2.7091379149682683e-07,
520
+ "loss": 54.6138,
521
+ "step": 7200
522
+ },
523
+ {
524
+ "epoch": 1.860820800407851,
525
+ "grad_norm": 825.6641845703125,
526
+ "learning_rate": 2.0786258770873647e-07,
527
+ "loss": 50.6318,
528
+ "step": 7300
529
+ },
530
+ {
531
+ "epoch": 1.8863114963038492,
532
+ "grad_norm": 344.0815124511719,
533
+ "learning_rate": 1.5299867030334815e-07,
534
+ "loss": 51.9353,
535
+ "step": 7400
536
+ },
537
+ {
538
+ "epoch": 1.911802192199847,
539
+ "grad_norm": 291.8919677734375,
540
+ "learning_rate": 1.0641577336322761e-07,
541
+ "loss": 47.0073,
542
  "step": 7500
543
  },
544
  {
545
+ "epoch": 1.937292888095845,
546
+ "grad_norm": 362.12451171875,
547
+ "learning_rate": 6.819348298638839e-08,
548
+ "loss": 53.6565,
549
+ "step": 7600
550
+ },
551
+ {
552
+ "epoch": 1.9627835839918428,
553
+ "grad_norm": 313.963623046875,
554
+ "learning_rate": 3.839710131477492e-08,
555
+ "loss": 52.6762,
556
+ "step": 7700
557
  },
558
  {
559
+ "epoch": 1.988274279887841,
560
+ "grad_norm": 187.05747985839844,
561
+ "learning_rate": 1.7077534966650767e-08,
562
+ "loss": 53.921,
563
+ "step": 7800
564
  },
565
  {
566
+ "epoch": 2.0,
567
+ "eval_loss": 2829.332275390625,
568
+ "eval_runtime": 10.5528,
569
+ "eval_samples_per_second": 330.433,
570
+ "eval_steps_per_second": 41.316,
571
+ "step": 7846
572
+ },
573
+ {
574
+ "epoch": 2.0137649757838387,
575
+ "grad_norm": 539.2352294921875,
576
+ "learning_rate": 4.2712080634949024e-09,
577
+ "loss": 52.3559,
578
+ "step": 7900
579
  },
580
  {
581
+ "epoch": 2.039255671679837,
582
+ "grad_norm": 650.2083129882812,
583
+ "learning_rate": 0.0,
584
+ "loss": 54.1146,
585
+ "step": 8000
586
  }
587
  ],
588
+ "logging_steps": 100,
589
+ "max_steps": 8000,
590
  "num_input_tokens_seen": 0,
591
+ "num_train_epochs": 3,
592
+ "save_steps": 2000,
593
  "total_flos": 0.0,
594
  "train_batch_size": 8,
595
  "trial_name": null,