romainnn commited on
Commit
baeab23
·
verified ·
1 Parent(s): 5b0eeff

Training in progress, step 213, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90d948f01888b537049c0b7f4cd265d9772f7c9fce56ca522794956b763856b0
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:942f8465cabeb76dcc95e6f1376a7a2bd0f43607b0fd95e02ca0696335823d9e
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ecb3abc7a79b3f6942aff7a32c9b7f831fa7e66b7e5696c754675ea496a238c
3
  size 85723284
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55b820f6aca64ee05247f1dcfe684e8698516c64a44a9585c856f3f4c25ba1c7
3
  size 85723284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f72c80b8d0207539470b3350fbbb8ff868cabe1e94b611568dbb68ea2a70094f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84cc23981c65e9e4b36610af3a7f8e0530e00fd931109c8f0f300b8809f05c8f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0ecec6be335b6d48527a855a4479db9f3cb677f2a541e8d4da775c12fae434f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3089190bafc90731224612761031dd1ac152bc3d675652a9bd05e3a743d0eea
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 2.7617204189300537,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
- "epoch": 0.5432937181663837,
5
  "eval_steps": 50,
6
- "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1447,6 +1447,97 @@
1447
  "eval_samples_per_second": 6.974,
1448
  "eval_steps_per_second": 1.744,
1449
  "step": 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450
  }
1451
  ],
1452
  "logging_steps": 1,
@@ -1470,12 +1561,12 @@
1470
  "should_evaluate": false,
1471
  "should_log": false,
1472
  "should_save": true,
1473
- "should_training_stop": false
1474
  },
1475
  "attributes": {}
1476
  }
1477
  },
1478
- "total_flos": 1.188874371905618e+18,
1479
  "train_batch_size": 4,
1480
  "trial_name": null,
1481
  "trial_params": null
 
1
  {
2
  "best_metric": 2.7617204189300537,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.5786078098471986,
5
  "eval_steps": 50,
6
+ "global_step": 213,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1447
  "eval_samples_per_second": 6.974,
1448
  "eval_steps_per_second": 1.744,
1449
  "step": 200
1450
+ },
1451
+ {
1452
+ "epoch": 0.5460101867572156,
1453
+ "grad_norm": 0.7055419087409973,
1454
+ "learning_rate": 1.7194582631535617e-06,
1455
+ "loss": 2.7168,
1456
+ "step": 201
1457
+ },
1458
+ {
1459
+ "epoch": 0.5487266553480475,
1460
+ "grad_norm": 0.7580511569976807,
1461
+ "learning_rate": 1.4454863088532388e-06,
1462
+ "loss": 2.7299,
1463
+ "step": 202
1464
+ },
1465
+ {
1466
+ "epoch": 0.5514431239388795,
1467
+ "grad_norm": 0.6726568341255188,
1468
+ "learning_rate": 1.19511780643915e-06,
1469
+ "loss": 2.8939,
1470
+ "step": 203
1471
+ },
1472
+ {
1473
+ "epoch": 0.5541595925297114,
1474
+ "grad_norm": 0.6482507586479187,
1475
+ "learning_rate": 9.684127182679526e-07,
1476
+ "loss": 2.7221,
1477
+ "step": 204
1478
+ },
1479
+ {
1480
+ "epoch": 0.5568760611205433,
1481
+ "grad_norm": 0.638670027256012,
1482
+ "learning_rate": 7.654253393936439e-07,
1483
+ "loss": 2.7499,
1484
+ "step": 205
1485
+ },
1486
+ {
1487
+ "epoch": 0.5595925297113752,
1488
+ "grad_norm": 0.7101965546607971,
1489
+ "learning_rate": 5.862042845640403e-07,
1490
+ "loss": 2.7859,
1491
+ "step": 206
1492
+ },
1493
+ {
1494
+ "epoch": 0.5623089983022072,
1495
+ "grad_norm": 0.7514301538467407,
1496
+ "learning_rate": 4.307924765777682e-07,
1497
+ "loss": 2.7718,
1498
+ "step": 207
1499
+ },
1500
+ {
1501
+ "epoch": 0.565025466893039,
1502
+ "grad_norm": 0.7341988682746887,
1503
+ "learning_rate": 2.9922713600439854e-07,
1504
+ "loss": 2.8015,
1505
+ "step": 208
1506
+ },
1507
+ {
1508
+ "epoch": 0.567741935483871,
1509
+ "grad_norm": 0.782926619052887,
1510
+ "learning_rate": 1.915397722702217e-07,
1511
+ "loss": 2.7818,
1512
+ "step": 209
1513
+ },
1514
+ {
1515
+ "epoch": 0.5704584040747029,
1516
+ "grad_norm": 0.7334633469581604,
1517
+ "learning_rate": 1.0775617611189503e-07,
1518
+ "loss": 2.7087,
1519
+ "step": 210
1520
+ },
1521
+ {
1522
+ "epoch": 0.5731748726655348,
1523
+ "grad_norm": 0.7129755020141602,
1524
+ "learning_rate": 4.789641339963957e-08,
1525
+ "loss": 2.7918,
1526
+ "step": 211
1527
+ },
1528
+ {
1529
+ "epoch": 0.5758913412563668,
1530
+ "grad_norm": 0.707548201084137,
1531
+ "learning_rate": 1.1974820331517312e-08,
1532
+ "loss": 2.7835,
1533
+ "step": 212
1534
+ },
1535
+ {
1536
+ "epoch": 0.5786078098471986,
1537
+ "grad_norm": 0.7628645300865173,
1538
+ "learning_rate": 0.0,
1539
+ "loss": 2.9418,
1540
+ "step": 213
1541
  }
1542
  ],
1543
  "logging_steps": 1,
 
1561
  "should_evaluate": false,
1562
  "should_log": false,
1563
  "should_save": true,
1564
+ "should_training_stop": true
1565
  },
1566
  "attributes": {}
1567
  }
1568
  },
1569
+ "total_flos": 1.2664015930376847e+18,
1570
  "train_batch_size": 4,
1571
  "trial_name": null,
1572
  "trial_params": null