Training in progress, step 300, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 608282672
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02bbdc360cbb830f85d05e9ac028118b9ca4752aacfbb134aa61809d90f843b5
|
3 |
size 608282672
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 168149074
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9eb94cdfb1ed5e40eb9f4399ccd35e5eebc44a8a00d58c868825d15d571e1829
|
3 |
size 168149074
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:48c9c3cc04dc8dae936a8ffd4ecbcd94617b17b93d527a7200c9ff2943f1f4eb
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aec4f833cafad92101025ba04d07a12ef45e359710881ba3a74082c3b400620d
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
-
"best_model_checkpoint": "miner_id_24/checkpoint-
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 150,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -128,6 +128,119 @@
|
|
128 |
"eval_samples_per_second": 9.858,
|
129 |
"eval_steps_per_second": 1.239,
|
130 |
"step": 150
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
}
|
132 |
],
|
133 |
"logging_steps": 10,
|
@@ -156,7 +269,7 @@
|
|
156 |
"attributes": {}
|
157 |
}
|
158 |
},
|
159 |
-
"total_flos":
|
160 |
"train_batch_size": 8,
|
161 |
"trial_name": null,
|
162 |
"trial_params": null
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.7424979209899902,
|
3 |
+
"best_model_checkpoint": "miner_id_24/checkpoint-300",
|
4 |
+
"epoch": 0.5383580080753702,
|
5 |
"eval_steps": 150,
|
6 |
+
"global_step": 300,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
128 |
"eval_samples_per_second": 9.858,
|
129 |
"eval_steps_per_second": 1.239,
|
130 |
"step": 150
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"epoch": 0.2871242709735307,
|
134 |
+
"grad_norm": 2.056764841079712,
|
135 |
+
"learning_rate": 2.9576011832620583e-05,
|
136 |
+
"loss": 2.7666,
|
137 |
+
"step": 160
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"epoch": 0.3050695379093764,
|
141 |
+
"grad_norm": 2.3999085426330566,
|
142 |
+
"learning_rate": 2.9495873291870436e-05,
|
143 |
+
"loss": 2.8254,
|
144 |
+
"step": 170
|
145 |
+
},
|
146 |
+
{
|
147 |
+
"epoch": 0.32301480484522205,
|
148 |
+
"grad_norm": 2.938490390777588,
|
149 |
+
"learning_rate": 2.940893033074948e-05,
|
150 |
+
"loss": 3.0781,
|
151 |
+
"step": 180
|
152 |
+
},
|
153 |
+
{
|
154 |
+
"epoch": 0.34096007178106774,
|
155 |
+
"grad_norm": 3.2307329177856445,
|
156 |
+
"learning_rate": 2.9315223760628224e-05,
|
157 |
+
"loss": 3.2347,
|
158 |
+
"step": 190
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"epoch": 0.35890533871691344,
|
162 |
+
"grad_norm": 10.07883358001709,
|
163 |
+
"learning_rate": 2.9214797567742036e-05,
|
164 |
+
"loss": 3.3451,
|
165 |
+
"step": 200
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"epoch": 0.3768506056527591,
|
169 |
+
"grad_norm": 2.220188617706299,
|
170 |
+
"learning_rate": 2.9107698892543862e-05,
|
171 |
+
"loss": 2.5911,
|
172 |
+
"step": 210
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"epoch": 0.39479587258860477,
|
176 |
+
"grad_norm": 2.3011841773986816,
|
177 |
+
"learning_rate": 2.8993978007576263e-05,
|
178 |
+
"loss": 2.6557,
|
179 |
+
"step": 220
|
180 |
+
},
|
181 |
+
{
|
182 |
+
"epoch": 0.4127411395244504,
|
183 |
+
"grad_norm": 2.9171500205993652,
|
184 |
+
"learning_rate": 2.8873688293873336e-05,
|
185 |
+
"loss": 2.9965,
|
186 |
+
"step": 230
|
187 |
+
},
|
188 |
+
{
|
189 |
+
"epoch": 0.4306864064602961,
|
190 |
+
"grad_norm": 3.5936508178710938,
|
191 |
+
"learning_rate": 2.874688621590339e-05,
|
192 |
+
"loss": 3.2309,
|
193 |
+
"step": 240
|
194 |
+
},
|
195 |
+
{
|
196 |
+
"epoch": 0.4486316733961418,
|
197 |
+
"grad_norm": 15.465261459350586,
|
198 |
+
"learning_rate": 2.861363129506436e-05,
|
199 |
+
"loss": 3.3962,
|
200 |
+
"step": 250
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"epoch": 0.46657694033198743,
|
204 |
+
"grad_norm": 2.3160524368286133,
|
205 |
+
"learning_rate": 2.847398608174417e-05,
|
206 |
+
"loss": 2.676,
|
207 |
+
"step": 260
|
208 |
+
},
|
209 |
+
{
|
210 |
+
"epoch": 0.4845222072678331,
|
211 |
+
"grad_norm": 2.586505651473999,
|
212 |
+
"learning_rate": 2.832801612595937e-05,
|
213 |
+
"loss": 2.7848,
|
214 |
+
"step": 270
|
215 |
+
},
|
216 |
+
{
|
217 |
+
"epoch": 0.5024674742036788,
|
218 |
+
"grad_norm": 3.1157450675964355,
|
219 |
+
"learning_rate": 2.8175789946585697e-05,
|
220 |
+
"loss": 2.9969,
|
221 |
+
"step": 280
|
222 |
+
},
|
223 |
+
{
|
224 |
+
"epoch": 0.5204127411395244,
|
225 |
+
"grad_norm": 3.554219961166382,
|
226 |
+
"learning_rate": 2.801737899919502e-05,
|
227 |
+
"loss": 3.2118,
|
228 |
+
"step": 290
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"epoch": 0.5383580080753702,
|
232 |
+
"grad_norm": 11.838674545288086,
|
233 |
+
"learning_rate": 2.7852857642513838e-05,
|
234 |
+
"loss": 3.19,
|
235 |
+
"step": 300
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"epoch": 0.5383580080753702,
|
239 |
+
"eval_loss": 0.7424979209899902,
|
240 |
+
"eval_runtime": 95.2404,
|
241 |
+
"eval_samples_per_second": 9.859,
|
242 |
+
"eval_steps_per_second": 1.239,
|
243 |
+
"step": 300
|
244 |
}
|
245 |
],
|
246 |
"logging_steps": 10,
|
|
|
269 |
"attributes": {}
|
270 |
}
|
271 |
},
|
272 |
+
"total_flos": 5.555972794535117e+17,
|
273 |
"train_batch_size": 8,
|
274 |
"trial_name": null,
|
275 |
"trial_params": null
|