|
{ |
|
"best_metric": 2.521301507949829, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.348280365694384, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006965607313887679, |
|
"grad_norm": 14.914785385131836, |
|
"learning_rate": 5e-05, |
|
"loss": 2.9946, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006965607313887679, |
|
"eval_loss": 3.8551180362701416, |
|
"eval_runtime": 39.8369, |
|
"eval_samples_per_second": 24.274, |
|
"eval_steps_per_second": 3.037, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.013931214627775359, |
|
"grad_norm": 12.962854385375977, |
|
"learning_rate": 0.0001, |
|
"loss": 3.1224, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02089682194166304, |
|
"grad_norm": 14.299446105957031, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 3.2202, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.027862429255550717, |
|
"grad_norm": 19.08392906188965, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.1768, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0348280365694384, |
|
"grad_norm": 27.249370574951172, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 3.1132, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.04179364388332608, |
|
"grad_norm": 26.656770706176758, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 2.9958, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.048759251197213756, |
|
"grad_norm": 32.521759033203125, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 2.9294, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.055724858511101434, |
|
"grad_norm": 38.11032485961914, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 2.8461, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.06269046582498912, |
|
"grad_norm": 42.11003112792969, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 2.8183, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0696560731388768, |
|
"grad_norm": 45.7680778503418, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 3.3967, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07662168045276448, |
|
"grad_norm": 34.14312744140625, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 3.5234, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.08358728776665215, |
|
"grad_norm": 33.50540542602539, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 3.7347, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.09055289508053983, |
|
"grad_norm": 26.83640480041504, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 3.4828, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.09751850239442751, |
|
"grad_norm": 22.63720703125, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 2.7741, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.10448410970831519, |
|
"grad_norm": 24.69816780090332, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 2.6105, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.11144971702220287, |
|
"grad_norm": 21.516197204589844, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 2.5476, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.11841532433609055, |
|
"grad_norm": 37.13068771362305, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 2.5517, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.12538093164997824, |
|
"grad_norm": 17.183576583862305, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.4099, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.13234653896386592, |
|
"grad_norm": 14.289627075195312, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 2.3682, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1393121462777536, |
|
"grad_norm": 9.297135353088379, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 2.3983, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.14627775359164127, |
|
"grad_norm": 13.06031322479248, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 2.3584, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.15324336090552895, |
|
"grad_norm": 35.48813247680664, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 2.808, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.16020896821941663, |
|
"grad_norm": 52.057186126708984, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 3.1874, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.1671745755333043, |
|
"grad_norm": 46.11458969116211, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 3.0909, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.174140182847192, |
|
"grad_norm": 65.17389678955078, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 3.7957, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.174140182847192, |
|
"eval_loss": 2.6668310165405273, |
|
"eval_runtime": 39.833, |
|
"eval_samples_per_second": 24.276, |
|
"eval_steps_per_second": 3.038, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.18110579016107967, |
|
"grad_norm": 10.990416526794434, |
|
"learning_rate": 5e-05, |
|
"loss": 2.8065, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.18807139747496734, |
|
"grad_norm": 8.937409400939941, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 2.4433, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.19503700478885502, |
|
"grad_norm": 11.488622665405273, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 2.3725, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.2020026121027427, |
|
"grad_norm": 9.3021879196167, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 2.3004, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.20896821941663038, |
|
"grad_norm": 8.825725555419922, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 2.2908, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.21593382673051806, |
|
"grad_norm": 8.21666431427002, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 2.2499, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.22289943404440574, |
|
"grad_norm": 10.44888687133789, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 2.2541, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.22986504135829341, |
|
"grad_norm": 12.075174331665039, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 2.299, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2368306486721811, |
|
"grad_norm": 13.294587135314941, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.3522, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.24379625598606877, |
|
"grad_norm": 22.585355758666992, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 2.9737, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.2507618632999565, |
|
"grad_norm": 34.31892395019531, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 2.6606, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.25772747061384416, |
|
"grad_norm": 36.366092681884766, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 3.1274, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.26469307792773183, |
|
"grad_norm": 23.1158390045166, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 3.3618, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.2716586852416195, |
|
"grad_norm": 6.393037796020508, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 2.5871, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.2786242925555072, |
|
"grad_norm": 7.840436935424805, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 2.2926, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.28558989986939487, |
|
"grad_norm": 8.410822868347168, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 2.2895, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.29255550718328255, |
|
"grad_norm": 9.684894561767578, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.2933, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2995211144971702, |
|
"grad_norm": 11.069299697875977, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 2.2368, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3064867218110579, |
|
"grad_norm": 13.160449028015137, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 2.1627, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3134523291249456, |
|
"grad_norm": 13.812862396240234, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 2.2141, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.32041793643883326, |
|
"grad_norm": 16.305538177490234, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 2.2591, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.32738354375272094, |
|
"grad_norm": 23.405792236328125, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 2.7118, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3343491510666086, |
|
"grad_norm": 32.53597640991211, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 2.7714, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3413147583804963, |
|
"grad_norm": 41.75061798095703, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 2.5721, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.348280365694384, |
|
"grad_norm": 44.7559814453125, |
|
"learning_rate": 0.0, |
|
"loss": 3.4154, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.348280365694384, |
|
"eval_loss": 2.521301507949829, |
|
"eval_runtime": 39.8939, |
|
"eval_samples_per_second": 24.239, |
|
"eval_steps_per_second": 3.033, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.259156315439104e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|