abaddon182 commited on
Commit
a609baf
·
verified ·
1 Parent(s): f298faa

Training in progress, step 1350, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aad83e5c87e1dcf953fdaec4845f9dbfcbab932b40a21e2621e40f440205a802
3
  size 608282672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d30350e360f601a3511eb106d62c7ac2fcc119d79f328213767c37a43f5539e
3
  size 608282672
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2849eea5f408f1edc29baf9c75436ee7de7a40a6f0a4daf5a15f9e5296c825e6
3
  size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b5c59e2edeb18ac4034051ad4c28efbd378d75f7fef7fbe6d27dbf832382642
3
  size 168149074
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:379ffe2a6c1e7c30b5d2c9f431b8b3af760b3661da210b891dd64a757c20122d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5b95f764d72ed6aa17d7197e22e16e0cc217530d7d3f18f36eafb2b4ceb1e32
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:beb0b0f4cb227409c25efa2d36f03edfc3a0032e3296f1707945c3a0c5611cc5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a866955cff9370cd3957339d6bf23f5ca8494fc491b0c5ef9330a9273b5d4460
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7147510051727295,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-1050",
4
- "epoch": 2.1534320323014806,
5
  "eval_steps": 150,
6
- "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -919,6 +919,119 @@
919
  "eval_samples_per_second": 9.862,
920
  "eval_steps_per_second": 1.239,
921
  "step": 1200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
922
  }
923
  ],
924
  "logging_steps": 10,
@@ -933,7 +1046,7 @@
933
  "early_stopping_threshold": 0.0
934
  },
935
  "attributes": {
936
- "early_stopping_patience_counter": 1
937
  }
938
  },
939
  "TrainerControl": {
@@ -947,7 +1060,7 @@
947
  "attributes": {}
948
  }
949
  },
950
- "total_flos": 2.2309772145626972e+18,
951
  "train_batch_size": 8,
952
  "trial_name": null,
953
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7145504951477051,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-1350",
4
+ "epoch": 2.4226110363391653,
5
  "eval_steps": 150,
6
+ "global_step": 1350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
919
  "eval_samples_per_second": 9.862,
920
  "eval_steps_per_second": 1.239,
921
  "step": 1200
922
+ },
923
+ {
924
+ "epoch": 2.171377299237326,
925
+ "grad_norm": 5.972898483276367,
926
+ "learning_rate": 2.86474508437579e-06,
927
+ "loss": 2.8738,
928
+ "step": 1210
929
+ },
930
+ {
931
+ "epoch": 2.1893225661731717,
932
+ "grad_norm": 3.204808473587036,
933
+ "learning_rate": 2.67658249108603e-06,
934
+ "loss": 2.4511,
935
+ "step": 1220
936
+ },
937
+ {
938
+ "epoch": 2.2072678331090176,
939
+ "grad_norm": 3.517888307571411,
940
+ "learning_rate": 2.4942045588130504e-06,
941
+ "loss": 2.3504,
942
+ "step": 1230
943
+ },
944
+ {
945
+ "epoch": 2.225213100044863,
946
+ "grad_norm": 3.9954092502593994,
947
+ "learning_rate": 2.317696896481024e-06,
948
+ "loss": 2.649,
949
+ "step": 1240
950
+ },
951
+ {
952
+ "epoch": 2.2431583669807087,
953
+ "grad_norm": 4.540075302124023,
954
+ "learning_rate": 2.1471423574861643e-06,
955
+ "loss": 2.7798,
956
+ "step": 1250
957
+ },
958
+ {
959
+ "epoch": 2.2611036339165547,
960
+ "grad_norm": 5.863144397735596,
961
+ "learning_rate": 1.982621000804979e-06,
962
+ "loss": 2.8962,
963
+ "step": 1260
964
+ },
965
+ {
966
+ "epoch": 2.2790489008524,
967
+ "grad_norm": 3.230573892593384,
968
+ "learning_rate": 1.8242100534143065e-06,
969
+ "loss": 2.5946,
970
+ "step": 1270
971
+ },
972
+ {
973
+ "epoch": 2.2969941677882457,
974
+ "grad_norm": 3.4696829319000244,
975
+ "learning_rate": 1.6719838740406313e-06,
976
+ "loss": 2.3252,
977
+ "step": 1280
978
+ },
979
+ {
980
+ "epoch": 2.3149394347240917,
981
+ "grad_norm": 4.225548267364502,
982
+ "learning_rate": 1.5260139182558363e-06,
983
+ "loss": 2.6343,
984
+ "step": 1290
985
+ },
986
+ {
987
+ "epoch": 2.332884701659937,
988
+ "grad_norm": 4.56865930557251,
989
+ "learning_rate": 1.3863687049356465e-06,
990
+ "loss": 2.8092,
991
+ "step": 1300
992
+ },
993
+ {
994
+ "epoch": 2.3508299685957827,
995
+ "grad_norm": 5.7747039794921875,
996
+ "learning_rate": 1.25311378409661e-06,
997
+ "loss": 2.9523,
998
+ "step": 1310
999
+ },
1000
+ {
1001
+ "epoch": 2.3687752355316287,
1002
+ "grad_norm": 3.258890390396118,
1003
+ "learning_rate": 1.1263117061266677e-06,
1004
+ "loss": 2.4122,
1005
+ "step": 1320
1006
+ },
1007
+ {
1008
+ "epoch": 2.3867205024674742,
1009
+ "grad_norm": 3.551067352294922,
1010
+ "learning_rate": 1.006021992423738e-06,
1011
+ "loss": 2.3727,
1012
+ "step": 1330
1013
+ },
1014
+ {
1015
+ "epoch": 2.4046657694033198,
1016
+ "grad_norm": 3.741065263748169,
1017
+ "learning_rate": 8.923011074561405e-07,
1018
+ "loss": 2.5788,
1019
+ "step": 1340
1020
+ },
1021
+ {
1022
+ "epoch": 2.4226110363391653,
1023
+ "grad_norm": 4.731313705444336,
1024
+ "learning_rate": 7.852024322579649e-07,
1025
+ "loss": 2.6879,
1026
+ "step": 1350
1027
+ },
1028
+ {
1029
+ "epoch": 2.4226110363391653,
1030
+ "eval_loss": 0.7145504951477051,
1031
+ "eval_runtime": 95.2568,
1032
+ "eval_samples_per_second": 9.858,
1033
+ "eval_steps_per_second": 1.239,
1034
+ "step": 1350
1035
  }
1036
  ],
1037
  "logging_steps": 10,
 
1046
  "early_stopping_threshold": 0.0
1047
  },
1048
  "attributes": {
1049
+ "early_stopping_patience_counter": 0
1050
  }
1051
  },
1052
  "TrainerControl": {
 
1060
  "attributes": {}
1061
  }
1062
  },
1063
+ "total_flos": 2.50754898332536e+18,
1064
  "train_batch_size": 8,
1065
  "trial_name": null,
1066
  "trial_params": null