ardaspear commited on
Commit
cdedf0a
·
verified ·
1 Parent(s): 8f36dd6

Training in progress, step 63, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd1b77ccf8b42aa0e5ba9d469d890d8760d777f73ea4f48371d6a250ffabcf58
3
  size 63592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a255cc5ce734bbe23f23c79f2b4078f279af75615ab2654732ef205ac08c7d6
3
  size 63592
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa2c7cf2d949340085056d6769de47512b761e4c311f420d9c7f1ac0ac3afd2a
3
  size 132798
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a2e8ea15401435695a1a6d7c6fa2077b7252bf3b3dc816e830f3582c7ce4bad
3
  size 132798
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1e9e72aa36a011dd8bb6eb4914d3ce04223a5c3d46913c8074bf89f9dd923bb
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1be0d95b349f4a5439b800db674fec71503f77dc49f4d60d939a137cc4996bad
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec430a8fba90f7f39f74e916eb32712c363a0fd20bb4904251fce0eb82f2b9cf
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:922634a168fad3088c2a461ec82359f2941891b1472f492b835996e27c3cba9d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.018409613909486065,
5
  "eval_steps": 9,
6
- "global_step": 54,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -189,6 +189,35 @@
189
  "eval_samples_per_second": 485.988,
190
  "eval_steps_per_second": 60.785,
191
  "step": 54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
  }
193
  ],
194
  "logging_steps": 3,
@@ -208,7 +237,7 @@
208
  "attributes": {}
209
  }
210
  },
211
- "total_flos": 404288962560.0,
212
  "train_batch_size": 8,
213
  "trial_name": null,
214
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.021477882894400408,
5
  "eval_steps": 9,
6
+ "global_step": 63,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
189
  "eval_samples_per_second": 485.988,
190
  "eval_steps_per_second": 60.785,
191
  "step": 54
192
+ },
193
+ {
194
+ "epoch": 0.019432370237790847,
195
+ "grad_norm": 0.4819900393486023,
196
+ "learning_rate": 2.3256088156396868e-05,
197
+ "loss": 44.3073,
198
+ "step": 57
199
+ },
200
+ {
201
+ "epoch": 0.02045512656609563,
202
+ "grad_norm": 0.5234776735305786,
203
+ "learning_rate": 2.0658795558326743e-05,
204
+ "loss": 44.3053,
205
+ "step": 60
206
+ },
207
+ {
208
+ "epoch": 0.021477882894400408,
209
+ "grad_norm": 0.5196526646614075,
210
+ "learning_rate": 1.8109066104575023e-05,
211
+ "loss": 44.3021,
212
+ "step": 63
213
+ },
214
+ {
215
+ "epoch": 0.021477882894400408,
216
+ "eval_loss": 11.077031135559082,
217
+ "eval_runtime": 10.1496,
218
+ "eval_samples_per_second": 486.817,
219
+ "eval_steps_per_second": 60.889,
220
+ "step": 63
221
  }
222
  ],
223
  "logging_steps": 3,
 
237
  "attributes": {}
238
  }
239
  },
240
+ "total_flos": 471670456320.0,
241
  "train_batch_size": 8,
242
  "trial_name": null,
243
  "trial_params": null