great0001 commited on
Commit
2887f1a
·
verified ·
1 Parent(s): 05064e8

Training in progress, step 39, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9068ab8b12910134390193df6e671ee9804adf662d99ae80242950652f33de89
3
  size 125248064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:689e56aaf984fcdfc4b933e871b56d04cdd4fa8c7abff9a2d9129b074eb6cece
3
  size 125248064
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a14b177ed9c9ec4660cb215498d18d35699f02daac2a5925d98f10b92bc75b5
3
  size 64219860
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b2b81bc20055e2789ae7a34d48bbd73ee0a8a7126174c9e6451384ec13a650
3
  size 64219860
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fbb9dcf0da2d9b5d3a9949a2085ac919ae660b7274cef9931a3a5782eab72ae
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bf85c58cca5aae7d759797e66cb053a57988fcfe19dedb65a2573f97c517485
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8712da1b2787df41952a507984ec77e0f72c59fac7ee6cf21606445686249de
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccc7e73cc5879da996ace4c3a10d9efe08a100111973e801d61997747e95e982
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.008209014128976242,
5
  "eval_steps": 13,
6
- "global_step": 26,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -213,6 +213,105 @@
213
  "eval_samples_per_second": 18.112,
214
  "eval_steps_per_second": 9.056,
215
  "step": 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  }
217
  ],
218
  "logging_steps": 1,
@@ -232,7 +331,7 @@
232
  "attributes": {}
233
  }
234
  },
235
- "total_flos": 8311338801561600.0,
236
  "train_batch_size": 2,
237
  "trial_name": null,
238
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.012313521193464362,
5
  "eval_steps": 13,
6
+ "global_step": 39,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
213
  "eval_samples_per_second": 18.112,
214
  "eval_steps_per_second": 9.056,
215
  "step": 26
216
+ },
217
+ {
218
+ "epoch": 0.008524745441629173,
219
+ "grad_norm": 2.8270161151885986,
220
+ "learning_rate": 0.00012334453638559057,
221
+ "loss": 4.3127,
222
+ "step": 27
223
+ },
224
+ {
225
+ "epoch": 0.008840476754282105,
226
+ "grad_norm": 2.343597173690796,
227
+ "learning_rate": 0.0001156434465040231,
228
+ "loss": 4.7903,
229
+ "step": 28
230
+ },
231
+ {
232
+ "epoch": 0.009156208066935038,
233
+ "grad_norm": 2.1331675052642822,
234
+ "learning_rate": 0.0001078459095727845,
235
+ "loss": 3.7894,
236
+ "step": 29
237
+ },
238
+ {
239
+ "epoch": 0.00947193937958797,
240
+ "grad_norm": 2.6220457553863525,
241
+ "learning_rate": 0.0001,
242
+ "loss": 4.2415,
243
+ "step": 30
244
+ },
245
+ {
246
+ "epoch": 0.009787670692240904,
247
+ "grad_norm": 2.525392532348633,
248
+ "learning_rate": 9.215409042721552e-05,
249
+ "loss": 4.3988,
250
+ "step": 31
251
+ },
252
+ {
253
+ "epoch": 0.010103402004893835,
254
+ "grad_norm": 2.459665298461914,
255
+ "learning_rate": 8.435655349597689e-05,
256
+ "loss": 4.202,
257
+ "step": 32
258
+ },
259
+ {
260
+ "epoch": 0.010419133317546767,
261
+ "grad_norm": 2.508035182952881,
262
+ "learning_rate": 7.66554636144095e-05,
263
+ "loss": 4.4926,
264
+ "step": 33
265
+ },
266
+ {
267
+ "epoch": 0.0107348646301997,
268
+ "grad_norm": 2.238208293914795,
269
+ "learning_rate": 6.909830056250527e-05,
270
+ "loss": 4.3429,
271
+ "step": 34
272
+ },
273
+ {
274
+ "epoch": 0.011050595942852633,
275
+ "grad_norm": 1.8518725633621216,
276
+ "learning_rate": 6.173165676349103e-05,
277
+ "loss": 3.7004,
278
+ "step": 35
279
+ },
280
+ {
281
+ "epoch": 0.011366327255505565,
282
+ "grad_norm": 2.1632683277130127,
283
+ "learning_rate": 5.4600950026045326e-05,
284
+ "loss": 3.6473,
285
+ "step": 36
286
+ },
287
+ {
288
+ "epoch": 0.011682058568158496,
289
+ "grad_norm": 2.4588687419891357,
290
+ "learning_rate": 4.7750143528405126e-05,
291
+ "loss": 4.118,
292
+ "step": 37
293
+ },
294
+ {
295
+ "epoch": 0.011997789880811429,
296
+ "grad_norm": 1.9152288436889648,
297
+ "learning_rate": 4.12214747707527e-05,
298
+ "loss": 3.8266,
299
+ "step": 38
300
+ },
301
+ {
302
+ "epoch": 0.012313521193464362,
303
+ "grad_norm": 2.8986527919769287,
304
+ "learning_rate": 3.5055195166981645e-05,
305
+ "loss": 4.5158,
306
+ "step": 39
307
+ },
308
+ {
309
+ "epoch": 0.012313521193464362,
310
+ "eval_loss": 0.9916089773178101,
311
+ "eval_runtime": 73.6951,
312
+ "eval_samples_per_second": 18.102,
313
+ "eval_steps_per_second": 9.051,
314
+ "step": 39
315
  }
316
  ],
317
  "logging_steps": 1,
 
331
  "attributes": {}
332
  }
333
  },
334
+ "total_flos": 1.242743039852544e+16,
335
  "train_batch_size": 2,
336
  "trial_name": null,
337
  "trial_params": null