|
{ |
|
"best_metric": 1.390590786933899, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.1564945226917058, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003129890453834116, |
|
"grad_norm": 83.8933334350586, |
|
"learning_rate": 1e-05, |
|
"loss": 10.5413, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003129890453834116, |
|
"eval_loss": 2.9433071613311768, |
|
"eval_runtime": 49.9776, |
|
"eval_samples_per_second": 10.785, |
|
"eval_steps_per_second": 2.701, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006259780907668232, |
|
"grad_norm": 73.74372863769531, |
|
"learning_rate": 2e-05, |
|
"loss": 10.704, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.009389671361502348, |
|
"grad_norm": 49.65017318725586, |
|
"learning_rate": 3e-05, |
|
"loss": 10.9243, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.012519561815336464, |
|
"grad_norm": 42.6108512878418, |
|
"learning_rate": 4e-05, |
|
"loss": 9.9771, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01564945226917058, |
|
"grad_norm": 35.75225830078125, |
|
"learning_rate": 5e-05, |
|
"loss": 8.5201, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.018779342723004695, |
|
"grad_norm": 34.45755386352539, |
|
"learning_rate": 6e-05, |
|
"loss": 8.7165, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02190923317683881, |
|
"grad_norm": 35.77829360961914, |
|
"learning_rate": 7e-05, |
|
"loss": 8.9756, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.025039123630672927, |
|
"grad_norm": 37.6871337890625, |
|
"learning_rate": 8e-05, |
|
"loss": 8.8051, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.028169014084507043, |
|
"grad_norm": 41.72507858276367, |
|
"learning_rate": 9e-05, |
|
"loss": 8.6268, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03129890453834116, |
|
"grad_norm": 31.615625381469727, |
|
"learning_rate": 0.0001, |
|
"loss": 7.8727, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03442879499217527, |
|
"grad_norm": 33.186031341552734, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 9.2117, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.03755868544600939, |
|
"grad_norm": 33.494564056396484, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 7.0062, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0406885758998435, |
|
"grad_norm": 31.77089500427246, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 7.8004, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.04381846635367762, |
|
"grad_norm": 31.947071075439453, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 7.7758, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.046948356807511735, |
|
"grad_norm": 32.472782135009766, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 7.4786, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.050078247261345854, |
|
"grad_norm": 24.537351608276367, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 6.2569, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.053208137715179966, |
|
"grad_norm": 33.11750793457031, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 7.5808, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.056338028169014086, |
|
"grad_norm": 26.680910110473633, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 7.6116, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0594679186228482, |
|
"grad_norm": 25.24735450744629, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 7.8923, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.06259780907668232, |
|
"grad_norm": 23.690746307373047, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 5.5552, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06572769953051644, |
|
"grad_norm": 24.43379783630371, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 5.7128, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.06885758998435054, |
|
"grad_norm": 26.594310760498047, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 5.9577, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.07198748043818466, |
|
"grad_norm": 26.899511337280273, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 5.7076, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.07511737089201878, |
|
"grad_norm": 26.310029983520508, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 5.8776, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0782472613458529, |
|
"grad_norm": 20.84659194946289, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 6.1357, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.081377151799687, |
|
"grad_norm": 22.88994789123535, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 6.6307, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.08450704225352113, |
|
"grad_norm": 20.51585578918457, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 4.2293, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.08763693270735524, |
|
"grad_norm": 25.414339065551758, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 6.9259, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.09076682316118936, |
|
"grad_norm": 18.835140228271484, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 4.94, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.09389671361502347, |
|
"grad_norm": 21.440311431884766, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 6.3151, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09702660406885759, |
|
"grad_norm": 26.966928482055664, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 7.0147, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.10015649452269171, |
|
"grad_norm": 20.210208892822266, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 7.4799, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.10328638497652583, |
|
"grad_norm": 17.627716064453125, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 5.6791, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.10641627543035993, |
|
"grad_norm": 21.506210327148438, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 6.3717, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.10954616588419405, |
|
"grad_norm": 24.187419891357422, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 5.8736, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.11267605633802817, |
|
"grad_norm": 21.968637466430664, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 7.358, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.11580594679186229, |
|
"grad_norm": 19.923280715942383, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 5.8589, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.1189358372456964, |
|
"grad_norm": 18.508562088012695, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 5.0105, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.12206572769953052, |
|
"grad_norm": 22.998958587646484, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 6.4225, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.12519561815336464, |
|
"grad_norm": 25.26504898071289, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 5.3981, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.12832550860719874, |
|
"grad_norm": 20.1059627532959, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 5.5859, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.13145539906103287, |
|
"grad_norm": 16.89756965637207, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 4.4125, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.13458528951486698, |
|
"grad_norm": 19.683380126953125, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 6.3446, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.13771517996870108, |
|
"grad_norm": 20.562591552734375, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 5.902, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.14084507042253522, |
|
"grad_norm": 18.05992889404297, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 6.2402, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.14397496087636932, |
|
"grad_norm": 19.82953643798828, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 6.349, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.14710485133020346, |
|
"grad_norm": 21.304370880126953, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 6.3377, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.15023474178403756, |
|
"grad_norm": 19.656156539916992, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 6.2723, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.15336463223787167, |
|
"grad_norm": 20.485694885253906, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 4.8062, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1564945226917058, |
|
"grad_norm": 21.565053939819336, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 7.1065, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1564945226917058, |
|
"eval_loss": 1.390590786933899, |
|
"eval_runtime": 50.8832, |
|
"eval_samples_per_second": 10.593, |
|
"eval_steps_per_second": 2.653, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.15499004493824e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|