ardaspear commited on
Commit
cf5c922
·
verified ·
1 Parent(s): 86be408

Training in progress, step 119, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ddc96c869e84d02f3d9fca584798f708f1ec256acf0fc2046f784f22bd75df4a
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c51bb1bd493fedab904030a7482e3c21bba78bd2533b6b08b9c73123b0a35d3a
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:41593cb66d0ade40fd58b3c15e0939e1fcdbe2ad92a21b6640acc77ded366dc4
3
  size 81730196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b99ee001fbe78e978d9a8949cfacde0ed77f2b37b4f24cd265a5eb26ee84e83d
3
  size 81730196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:047cafb3f6e46fce616fd6719483c734a62b1890d850060214a5a61bf44fb49d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5232f719bd209899927dac4f0c85d3dc677af3c55e9d27e5ace57effddf642d1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fcbdf5cce354397b1cc7dbc75ae72cd1ce74fbf84991f656a8ae8c5ec4cf6c4c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c91934808157be4b4581cbac88c1dcb8ab73e7092f7b8aa05c4fbac8ab77615f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.026179018286814244,
5
  "eval_steps": 17,
6
- "global_step": 102,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -301,6 +301,49 @@
301
  "eval_samples_per_second": 13.281,
302
  "eval_steps_per_second": 1.661,
303
  "step": 102
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  }
305
  ],
306
  "logging_steps": 3,
@@ -320,7 +363,7 @@
320
  "attributes": {}
321
  }
322
  },
323
- "total_flos": 1.4865650885748326e+17,
324
  "train_batch_size": 8,
325
  "trial_name": null,
326
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.030542188001283284,
5
  "eval_steps": 17,
6
+ "global_step": 119,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
301
  "eval_samples_per_second": 13.281,
302
  "eval_steps_per_second": 1.661,
303
  "step": 102
304
+ },
305
+ {
306
+ "epoch": 0.026948989412897015,
307
+ "grad_norm": 0.3292534053325653,
308
+ "learning_rate": 5e-05,
309
+ "loss": 2.2091,
310
+ "step": 105
311
+ },
312
+ {
313
+ "epoch": 0.02771896053897979,
314
+ "grad_norm": 0.34186315536499023,
315
+ "learning_rate": 4.7520812266338885e-05,
316
+ "loss": 2.2861,
317
+ "step": 108
318
+ },
319
+ {
320
+ "epoch": 0.02848893166506256,
321
+ "grad_norm": 0.3780612051486969,
322
+ "learning_rate": 4.504772348747687e-05,
323
+ "loss": 2.2936,
324
+ "step": 111
325
+ },
326
+ {
327
+ "epoch": 0.029258902791145333,
328
+ "grad_norm": 0.3421897888183594,
329
+ "learning_rate": 4.2586817614407895e-05,
330
+ "loss": 2.2416,
331
+ "step": 114
332
+ },
333
+ {
334
+ "epoch": 0.030028873917228104,
335
+ "grad_norm": 0.38082820177078247,
336
+ "learning_rate": 4.0144148627425993e-05,
337
+ "loss": 2.3025,
338
+ "step": 117
339
+ },
340
+ {
341
+ "epoch": 0.030542188001283284,
342
+ "eval_loss": 2.292694330215454,
343
+ "eval_runtime": 494.1273,
344
+ "eval_samples_per_second": 13.282,
345
+ "eval_steps_per_second": 1.662,
346
+ "step": 119
347
  }
348
  ],
349
  "logging_steps": 3,
 
363
  "attributes": {}
364
  }
365
  },
366
+ "total_flos": 1.7469611905646592e+17,
367
  "train_batch_size": 8,
368
  "trial_name": null,
369
  "trial_params": null