|
{ |
|
"best_metric": 0.006671774201095104, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.08064516129032258, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0016129032258064516, |
|
"grad_norm": 0.6049064993858337, |
|
"learning_rate": 1e-05, |
|
"loss": 0.9039, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0016129032258064516, |
|
"eval_loss": 0.7379059195518494, |
|
"eval_runtime": 106.5243, |
|
"eval_samples_per_second": 9.801, |
|
"eval_steps_per_second": 2.45, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0032258064516129032, |
|
"grad_norm": 0.7415255904197693, |
|
"learning_rate": 2e-05, |
|
"loss": 1.1299, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004838709677419355, |
|
"grad_norm": 0.8382638692855835, |
|
"learning_rate": 3e-05, |
|
"loss": 1.2151, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0064516129032258064, |
|
"grad_norm": 0.862156331539154, |
|
"learning_rate": 4e-05, |
|
"loss": 1.2969, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.008064516129032258, |
|
"grad_norm": 0.9778095483779907, |
|
"learning_rate": 5e-05, |
|
"loss": 1.2646, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00967741935483871, |
|
"grad_norm": 0.9458996057510376, |
|
"learning_rate": 6e-05, |
|
"loss": 1.2015, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01129032258064516, |
|
"grad_norm": 0.981987476348877, |
|
"learning_rate": 7e-05, |
|
"loss": 1.0207, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.012903225806451613, |
|
"grad_norm": 0.8597192764282227, |
|
"learning_rate": 8e-05, |
|
"loss": 0.7901, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.014516129032258065, |
|
"grad_norm": 0.9466065764427185, |
|
"learning_rate": 9e-05, |
|
"loss": 0.7297, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.016129032258064516, |
|
"grad_norm": 1.1080763339996338, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4713, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.017741935483870968, |
|
"grad_norm": 0.8401578068733215, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 0.2136, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.01935483870967742, |
|
"grad_norm": 0.9712587594985962, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 0.2193, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.020967741935483872, |
|
"grad_norm": 1.2566802501678467, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 0.2793, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.02258064516129032, |
|
"grad_norm": 0.9939321875572205, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 0.1432, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.024193548387096774, |
|
"grad_norm": 1.6880418062210083, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 0.1768, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.025806451612903226, |
|
"grad_norm": 1.028950810432434, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 0.1185, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.027419354838709678, |
|
"grad_norm": 0.7586027383804321, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 0.0659, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02903225806451613, |
|
"grad_norm": 0.9536590576171875, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 0.072, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03064516129032258, |
|
"grad_norm": 0.7935003638267517, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 0.0533, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03225806451612903, |
|
"grad_norm": 0.9564946293830872, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 0.0539, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03387096774193549, |
|
"grad_norm": 0.6796997785568237, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 0.0269, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.035483870967741936, |
|
"grad_norm": 1.2710872888565063, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 0.1013, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.037096774193548385, |
|
"grad_norm": 0.5349439382553101, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 0.0248, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03870967741935484, |
|
"grad_norm": 1.372443437576294, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 0.1846, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04032258064516129, |
|
"grad_norm": 0.7447899580001831, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 0.0277, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.041935483870967745, |
|
"grad_norm": 0.2933860719203949, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 0.0103, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.043548387096774194, |
|
"grad_norm": 1.2192440032958984, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 0.0694, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.04516129032258064, |
|
"grad_norm": 1.3143314123153687, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 0.0395, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.0467741935483871, |
|
"grad_norm": 0.8509743213653564, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 0.1224, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.04838709677419355, |
|
"grad_norm": 0.9023564457893372, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 0.0558, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.3841659128665924, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 0.0202, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05161290322580645, |
|
"grad_norm": 2.0831139087677, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 0.0486, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0532258064516129, |
|
"grad_norm": 0.3329290747642517, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 0.0155, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.054838709677419356, |
|
"grad_norm": 0.3646816313266754, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 0.0219, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.056451612903225805, |
|
"grad_norm": 0.5079269409179688, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 0.0392, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05806451612903226, |
|
"grad_norm": 1.1356701850891113, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 0.0314, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.05967741935483871, |
|
"grad_norm": 0.7165648341178894, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 0.0622, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06129032258064516, |
|
"grad_norm": 0.9975515604019165, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 0.0386, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.06290322580645161, |
|
"grad_norm": 0.5520864725112915, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 0.0421, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.06451612903225806, |
|
"grad_norm": 0.6942296028137207, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 0.0219, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06612903225806452, |
|
"grad_norm": 1.3481652736663818, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 0.143, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06774193548387097, |
|
"grad_norm": 2.7365214824676514, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 0.0288, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06935483870967742, |
|
"grad_norm": 0.2986465394496918, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 0.0081, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07096774193548387, |
|
"grad_norm": 0.42408058047294617, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 0.013, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.07258064516129033, |
|
"grad_norm": 1.1357313394546509, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 0.0186, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07419354838709677, |
|
"grad_norm": 0.9855334162712097, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 0.0275, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.07580645161290323, |
|
"grad_norm": 0.14201870560646057, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 0.0036, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.07741935483870968, |
|
"grad_norm": 1.7319813966751099, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0958, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07903225806451612, |
|
"grad_norm": 1.869276523590088, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 0.0504, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.08064516129032258, |
|
"grad_norm": 1.4869447946548462, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 0.0542, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08064516129032258, |
|
"eval_loss": 0.006671774201095104, |
|
"eval_runtime": 107.2911, |
|
"eval_samples_per_second": 9.731, |
|
"eval_steps_per_second": 2.433, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.27296891518976e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|