romainnn commited on
Commit
3e5bd3f
·
verified ·
1 Parent(s): c71bcec

Training in progress, step 113, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b6b787b9f89d02f7dc3b19bedf8767e6c4968a519b508b2c87dad6d80083b1ca
3
  size 63592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5692f34c97b1100d6c75723da71e2b0f079d9b91e8a95fe6f494f4b63f6a10d6
3
  size 63592
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d8fbc5adea491f72a0015774548c8af88a7276041f77ca09132f6b0b1933ebf
3
  size 132798
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e04b29f4c1c43c72a522148668f8ef9c6f68d772079c07760733a8fe27f5b10e
3
  size 132798
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b42a00279b6c46adb4b97489b480522f7ffc839a33b02d2036504646d2a860f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b367c1d9dc53d9831965356fc9a49bb755995d4a290a4eb8463184b6c0ce43d6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c90fedf08c2ca11af554a410bf047de63425ab96dca555f6c49020443d209ed
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45b74f08c885c76380c4ac1d181f205527fac73fc64a3e940746e9c93cee703f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 11.01180648803711,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 0.8859357696566998,
5
  "eval_steps": 100,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -723,6 +723,97 @@
723
  "eval_samples_per_second": 279.069,
724
  "eval_steps_per_second": 70.502,
725
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
726
  }
727
  ],
728
  "logging_steps": 1,
@@ -746,12 +837,12 @@
746
  "should_evaluate": false,
747
  "should_log": false,
748
  "should_save": true,
749
- "should_training_stop": false
750
  },
751
  "attributes": {}
752
  }
753
  },
754
- "total_flos": 1497366528000.0,
755
  "train_batch_size": 4,
756
  "trial_name": null,
757
  "trial_params": null
 
1
  {
2
  "best_metric": 11.01180648803711,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 1.0066445182724253,
5
  "eval_steps": 100,
6
+ "global_step": 113,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
723
  "eval_samples_per_second": 279.069,
724
  "eval_steps_per_second": 70.502,
725
  "step": 100
726
+ },
727
+ {
728
+ "epoch": 0.8947951273532669,
729
+ "grad_norm": 1.4452682733535767,
730
+ "learning_rate": 6.623751839046455e-06,
731
+ "loss": 88.1532,
732
+ "step": 101
733
+ },
734
+ {
735
+ "epoch": 0.9036544850498339,
736
+ "grad_norm": 1.3395559787750244,
737
+ "learning_rate": 5.575745090030138e-06,
738
+ "loss": 88.1788,
739
+ "step": 102
740
+ },
741
+ {
742
+ "epoch": 0.9125138427464009,
743
+ "grad_norm": 1.4853826761245728,
744
+ "learning_rate": 4.61557487563673e-06,
745
+ "loss": 88.1785,
746
+ "step": 103
747
+ },
748
+ {
749
+ "epoch": 0.9213732004429679,
750
+ "grad_norm": 1.385549545288086,
751
+ "learning_rate": 3.7441343776484117e-06,
752
+ "loss": 88.1602,
753
+ "step": 104
754
+ },
755
+ {
756
+ "epoch": 0.9302325581395349,
757
+ "grad_norm": 1.4303137063980103,
758
+ "learning_rate": 2.9622342385589254e-06,
759
+ "loss": 88.0615,
760
+ "step": 105
761
+ },
762
+ {
763
+ "epoch": 0.9390919158361019,
764
+ "grad_norm": 1.3606700897216797,
765
+ "learning_rate": 2.2706018074875045e-06,
766
+ "loss": 88.1822,
767
+ "step": 106
768
+ },
769
+ {
770
+ "epoch": 0.9479512735326688,
771
+ "grad_norm": 1.3698482513427734,
772
+ "learning_rate": 1.6698804635747579e-06,
773
+ "loss": 88.1556,
774
+ "step": 107
775
+ },
776
+ {
777
+ "epoch": 0.9568106312292359,
778
+ "grad_norm": 1.4697903394699097,
779
+ "learning_rate": 1.160629017490389e-06,
780
+ "loss": 88.1125,
781
+ "step": 108
782
+ },
783
+ {
784
+ "epoch": 0.9656699889258029,
785
+ "grad_norm": 1.2570511102676392,
786
+ "learning_rate": 7.433211916092142e-07,
787
+ "loss": 88.0736,
788
+ "step": 109
789
+ },
790
+ {
791
+ "epoch": 0.9745293466223699,
792
+ "grad_norm": 1.4869191646575928,
793
+ "learning_rate": 4.1834517933907467e-07,
794
+ "loss": 88.137,
795
+ "step": 110
796
+ },
797
+ {
798
+ "epoch": 0.9833887043189369,
799
+ "grad_norm": 1.265148401260376,
800
+ "learning_rate": 1.8600328401061629e-07,
801
+ "loss": 88.1054,
802
+ "step": 111
803
+ },
804
+ {
805
+ "epoch": 0.9922480620155039,
806
+ "grad_norm": 1.3339345455169678,
807
+ "learning_rate": 4.651163766484779e-08,
808
+ "loss": 88.0929,
809
+ "step": 112
810
+ },
811
+ {
812
+ "epoch": 1.0066445182724253,
813
+ "grad_norm": 1.3491634130477905,
814
+ "learning_rate": 0.0,
815
+ "loss": 88.1079,
816
+ "step": 113
817
  }
818
  ],
819
  "logging_steps": 1,
 
837
  "should_evaluate": false,
838
  "should_log": false,
839
  "should_save": true,
840
+ "should_training_stop": true
841
  },
842
  "attributes": {}
843
  }
844
  },
845
+ "total_flos": 1692024176640.0,
846
  "train_batch_size": 4,
847
  "trial_name": null,
848
  "trial_params": null