lesso08 commited on
Commit
751c88b
·
verified ·
1 Parent(s): 32fa1b5

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80b0c069be1f99334549ee3e7ef82e4f991c65184bdb174892b650818ac6c42a
3
  size 639691872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f326f334eef37db7e81d1bdc92c8688331a0abc7cc3fb88f688626369d739878
3
  size 639691872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b1cbe3e1ad56eb729f9949a7f151384f75f7de2164ced739ee717f579afa50a
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcb7967971c95e1cb3bec161bb5c36617b4c72d2f099ade3d966881a7c490888
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3273e6662d5bc26ddd0f96c5fb57c8729c00be63cf0428e791e11030a9be6376
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25eaed32035452f61b530f05ddb1757ce562ec2a5b996368ee4ed21c4aef9551
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14680b86f4f2c08a2d7e1e3f0cdd35571310973b8d85bb4aabd1acab6b3cccbe
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27d9d26262db0192551c63e8e2cb503f12fb8cb162ff6ce7f18ac6f7e9d328d6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 1.273123025894165,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.0032591337222566243,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 14.515,
59
  "eval_steps_per_second": 3.629,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -72,7 +115,7 @@
72
  "early_stopping_threshold": 0.0
73
  },
74
  "attributes": {
75
- "early_stopping_patience_counter": 0
76
  }
77
  },
78
  "TrainerControl": {
@@ -86,7 +129,7 @@
86
  "attributes": {}
87
  }
88
  },
89
- "total_flos": 1.0149182388043776e+16,
90
  "train_batch_size": 4,
91
  "trial_name": null,
92
  "trial_params": null
 
1
  {
2
  "best_metric": 1.273123025894165,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.006518267444513249,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 14.515,
59
  "eval_steps_per_second": 3.629,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.003910960466707949,
64
+ "grad_norm": 2.3496737480163574,
65
+ "learning_rate": 0.0002077466612270217,
66
+ "loss": 1.0568,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.004562787211159274,
71
+ "grad_norm": 2.499488353729248,
72
+ "learning_rate": 0.0002069878791491233,
73
+ "loss": 1.1359,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.005214613955610598,
78
+ "grad_norm": 3.1299209594726562,
79
+ "learning_rate": 0.00020572735047631578,
80
+ "loss": 1.2264,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.005866440700061924,
85
+ "grad_norm": 3.3837058544158936,
86
+ "learning_rate": 0.00020397121637758515,
87
+ "loss": 1.2004,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.006518267444513249,
92
+ "grad_norm": 2.973175287246704,
93
+ "learning_rate": 0.00020172803256173445,
94
+ "loss": 1.545,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.006518267444513249,
99
+ "eval_loss": 1.275679111480713,
100
+ "eval_runtime": 446.3022,
101
+ "eval_samples_per_second": 14.474,
102
+ "eval_steps_per_second": 3.619,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
115
  "early_stopping_threshold": 0.0
116
  },
117
  "attributes": {
118
+ "early_stopping_patience_counter": 1
119
  }
120
  },
121
  "TrainerControl": {
 
129
  "attributes": {}
130
  }
131
  },
132
+ "total_flos": 2.0633874937675776e+16,
133
  "train_batch_size": 4,
134
  "trial_name": null,
135
  "trial_params": null