Pranay17 commited on
Commit
7cebbf5
·
verified ·
1 Parent(s): 952ccd2

Training in progress, step 2000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c2f40d58562647fcfba8d16d7dbd10f38e675cf2ef968a6edcb3ae166f4689d
3
  size 42002584
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c223867a54003574441075cb1c2f7cf35a5f5d21c1b65f662fd6798e4d4a007
3
  size 42002584
last-checkpoint/global_step2000/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c49902c81a6dcc2e849480ea21c30986c3b4e4d380b2c0751058ef86be78d73
3
+ size 251710672
last-checkpoint/global_step2000/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f14756e6336f379701f8203b5a82466933e6c72bda35d65de3883d4c0684d5d
3
+ size 153747385
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1500
 
1
+ global_step2000
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9f5d56c1ca14011dd7b001c1386b904a9bf2bc098727c58e8a26a0f73e026d3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:227788b816b8b06d238c8c63acd422670703268e5b2b75d7dc2a5e755c8acbb5
3
  size 14244
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 8.547008547008547,
5
  "eval_steps": 1000,
6
- "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -224,6 +224,76 @@
224
  "learning_rate": 0.00012522522522522524,
225
  "loss": 0.1031,
226
  "step": 1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
  }
228
  ],
229
  "logging_steps": 50,
@@ -243,7 +313,7 @@
243
  "attributes": {}
244
  }
245
  },
246
- "total_flos": 4.542048423326515e+16,
247
  "train_batch_size": 2,
248
  "trial_name": null,
249
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 11.396011396011396,
5
  "eval_steps": 1000,
6
+ "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
224
  "learning_rate": 0.00012522522522522524,
225
  "loss": 0.1031,
226
  "step": 1500
227
+ },
228
+ {
229
+ "epoch": 8.831908831908832,
230
+ "grad_norm": 2.925875663757324,
231
+ "learning_rate": 0.00012272272272272273,
232
+ "loss": 0.1098,
233
+ "step": 1550
234
+ },
235
+ {
236
+ "epoch": 9.116809116809117,
237
+ "grad_norm": 2.593219757080078,
238
+ "learning_rate": 0.00012022022022022023,
239
+ "loss": 0.0979,
240
+ "step": 1600
241
+ },
242
+ {
243
+ "epoch": 9.401709401709402,
244
+ "grad_norm": 2.457099199295044,
245
+ "learning_rate": 0.00011771771771771771,
246
+ "loss": 0.0917,
247
+ "step": 1650
248
+ },
249
+ {
250
+ "epoch": 9.686609686609687,
251
+ "grad_norm": 2.513124465942383,
252
+ "learning_rate": 0.00011521521521521521,
253
+ "loss": 0.0884,
254
+ "step": 1700
255
+ },
256
+ {
257
+ "epoch": 9.971509971509972,
258
+ "grad_norm": 0.9835783243179321,
259
+ "learning_rate": 0.00011271271271271271,
260
+ "loss": 0.0891,
261
+ "step": 1750
262
+ },
263
+ {
264
+ "epoch": 10.256410256410255,
265
+ "grad_norm": 1.4291648864746094,
266
+ "learning_rate": 0.0001102102102102102,
267
+ "loss": 0.0792,
268
+ "step": 1800
269
+ },
270
+ {
271
+ "epoch": 10.54131054131054,
272
+ "grad_norm": 0.974391758441925,
273
+ "learning_rate": 0.00010770770770770771,
274
+ "loss": 0.0834,
275
+ "step": 1850
276
+ },
277
+ {
278
+ "epoch": 10.826210826210826,
279
+ "grad_norm": 2.3604581356048584,
280
+ "learning_rate": 0.0001052052052052052,
281
+ "loss": 0.0829,
282
+ "step": 1900
283
+ },
284
+ {
285
+ "epoch": 11.11111111111111,
286
+ "grad_norm": 1.6176166534423828,
287
+ "learning_rate": 0.0001027027027027027,
288
+ "loss": 0.08,
289
+ "step": 1950
290
+ },
291
+ {
292
+ "epoch": 11.396011396011396,
293
+ "grad_norm": 1.9266570806503296,
294
+ "learning_rate": 0.0001002002002002002,
295
+ "loss": 0.0763,
296
+ "step": 2000
297
  }
298
  ],
299
  "logging_steps": 50,
 
313
  "attributes": {}
314
  }
315
  },
316
+ "total_flos": 6.055886956213043e+16,
317
  "train_batch_size": 2,
318
  "trial_name": null,
319
  "trial_params": null