ardaspear commited on
Commit
ac0b31e
·
verified ·
1 Parent(s): b5ffc5e

Training in progress, step 170, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fcdf10964fbea563f7585add757a1eded783fdea527b5b19c6c581195c5c3eb6
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a75b8b4f060b70b4acbf15e2810188aa5e77f7b80896679941436511f37eca44
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d9ce8f5d2c229bb84c86a6f32a33598b0f8c59ea40233c2dca6776e522db316
3
  size 81730196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f67b104ef782f97707a79c624761edded35f0157553358c6b27c9b692cd99a3e
3
  size 81730196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8376b1a21f60d7891b89de2e0d8b9ab5dc44b12e00ba3ecc733ed71c6230518d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03056d1c4ca5ebbca3689312a90ecc3659af7e67511e12c82f70b630038ea234
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d99dc7a150ff6ed818c8735e9e9061e757b4b841b8d74bde2c9d7a2195ff136
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fab1f30942a5e2bb9a9e5cc3477ef48cdcd39a7b78f8a45a46db0926bdbf2b4f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03926852743022137,
5
  "eval_steps": 17,
6
- "global_step": 153,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -444,6 +444,49 @@
444
  "eval_samples_per_second": 13.28,
445
  "eval_steps_per_second": 1.661,
446
  "step": 153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447
  }
448
  ],
449
  "logging_steps": 3,
@@ -463,7 +506,7 @@
463
  "attributes": {}
464
  }
465
  },
466
- "total_flos": 2.2578649349750784e+17,
467
  "train_batch_size": 8,
468
  "trial_name": null,
469
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04363169714469041,
5
  "eval_steps": 17,
6
+ "global_step": 170,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
444
  "eval_samples_per_second": 13.28,
445
  "eval_steps_per_second": 1.661,
446
  "step": 153
447
+ },
448
+ {
449
+ "epoch": 0.04003849855630414,
450
+ "grad_norm": 0.415403813123703,
451
+ "learning_rate": 1.2658926150792322e-05,
452
+ "loss": 2.3245,
453
+ "step": 156
454
+ },
455
+ {
456
+ "epoch": 0.04080846968238691,
457
+ "grad_norm": 0.37216442823410034,
458
+ "learning_rate": 1.1056136061894384e-05,
459
+ "loss": 2.2985,
460
+ "step": 159
461
+ },
462
+ {
463
+ "epoch": 0.04157844080846968,
464
+ "grad_norm": 0.3392238914966583,
465
+ "learning_rate": 9.549150281252633e-06,
466
+ "loss": 2.2315,
467
+ "step": 162
468
+ },
469
+ {
470
+ "epoch": 0.04234841193455245,
471
+ "grad_norm": 0.3757283091545105,
472
+ "learning_rate": 8.141676086873572e-06,
473
+ "loss": 2.2423,
474
+ "step": 165
475
+ },
476
+ {
477
+ "epoch": 0.04311838306063523,
478
+ "grad_norm": 0.3205427825450897,
479
+ "learning_rate": 6.837175952121306e-06,
480
+ "loss": 2.3192,
481
+ "step": 168
482
+ },
483
+ {
484
+ "epoch": 0.04363169714469041,
485
+ "eval_loss": 2.2842438220977783,
486
+ "eval_runtime": 494.3736,
487
+ "eval_samples_per_second": 13.275,
488
+ "eval_steps_per_second": 1.661,
489
+ "step": 170
490
  }
491
  ],
492
  "logging_steps": 3,
 
506
  "attributes": {}
507
  }
508
  },
509
+ "total_flos": 2.528149496534139e+17,
510
  "train_batch_size": 8,
511
  "trial_name": null,
512
  "trial_params": null