|
{ |
|
"best_metric": 0.12700584530830383, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-500", |
|
"epoch": 0.5437737901033171, |
|
"eval_steps": 50, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001087547580206634, |
|
"eval_loss": 2.6140365600585938, |
|
"eval_runtime": 22.8682, |
|
"eval_samples_per_second": 16.967, |
|
"eval_steps_per_second": 4.242, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010875475802066341, |
|
"grad_norm": 1.15413498878479, |
|
"learning_rate": 4.2000000000000004e-05, |
|
"loss": 0.6819, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.021750951604132682, |
|
"grad_norm": 1.6452891826629639, |
|
"learning_rate": 8.400000000000001e-05, |
|
"loss": 0.6379, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03262642740619902, |
|
"grad_norm": 2.625800848007202, |
|
"learning_rate": 0.000126, |
|
"loss": 0.4516, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.043501903208265365, |
|
"grad_norm": 6.286452770233154, |
|
"learning_rate": 0.00016800000000000002, |
|
"loss": 0.5501, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.054377379010331704, |
|
"grad_norm": 4.992573261260986, |
|
"learning_rate": 0.00021, |
|
"loss": 0.4475, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.054377379010331704, |
|
"eval_loss": 0.6009775400161743, |
|
"eval_runtime": 22.7637, |
|
"eval_samples_per_second": 17.045, |
|
"eval_steps_per_second": 4.261, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06525285481239804, |
|
"grad_norm": 1.2332814931869507, |
|
"learning_rate": 0.00020974422527728155, |
|
"loss": 0.2856, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07612833061446438, |
|
"grad_norm": 0.9268720149993896, |
|
"learning_rate": 0.0002089781472178649, |
|
"loss": 0.2699, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08700380641653073, |
|
"grad_norm": 1.5051002502441406, |
|
"learning_rate": 0.0002077054980770496, |
|
"loss": 0.2242, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09787928221859707, |
|
"grad_norm": 4.559469223022461, |
|
"learning_rate": 0.00020593247807352348, |
|
"loss": 0.2854, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.10875475802066341, |
|
"grad_norm": 2.309359073638916, |
|
"learning_rate": 0.00020366772518252038, |
|
"loss": 0.4822, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.10875475802066341, |
|
"eval_loss": 0.6521262526512146, |
|
"eval_runtime": 22.8192, |
|
"eval_samples_per_second": 17.003, |
|
"eval_steps_per_second": 4.251, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11963023382272975, |
|
"grad_norm": 0.9215604662895203, |
|
"learning_rate": 0.0002009222730524731, |
|
"loss": 0.2915, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.13050570962479607, |
|
"grad_norm": 0.9864150285720825, |
|
"learning_rate": 0.00019770949725018733, |
|
"loss": 0.2042, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.14138118542686243, |
|
"grad_norm": 1.3311983346939087, |
|
"learning_rate": 0.00019404505009642473, |
|
"loss": 0.1785, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.15225666122892875, |
|
"grad_norm": 2.455376386642456, |
|
"learning_rate": 0.0001899467844093695, |
|
"loss": 0.226, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.1631321370309951, |
|
"grad_norm": 5.563918113708496, |
|
"learning_rate": 0.00018543466652749268, |
|
"loss": 0.2853, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.1631321370309951, |
|
"eval_loss": 0.39147135615348816, |
|
"eval_runtime": 22.7958, |
|
"eval_samples_per_second": 17.021, |
|
"eval_steps_per_second": 4.255, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.17400761283306146, |
|
"grad_norm": 0.6848308444023132, |
|
"learning_rate": 0.00018053067903555837, |
|
"loss": 0.2406, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.18488308863512778, |
|
"grad_norm": 1.072629451751709, |
|
"learning_rate": 0.00017525871366768012, |
|
"loss": 0.1456, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.19575856443719414, |
|
"grad_norm": 1.8140205144882202, |
|
"learning_rate": 0.00016964445490919413, |
|
"loss": 0.1917, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.20663404023926046, |
|
"grad_norm": 2.534623861312866, |
|
"learning_rate": 0.00016371525486442843, |
|
"loss": 0.2614, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.21750951604132682, |
|
"grad_norm": 3.094886302947998, |
|
"learning_rate": 0.0001575, |
|
"loss": 0.2873, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21750951604132682, |
|
"eval_loss": 0.3247433006763458, |
|
"eval_runtime": 22.7847, |
|
"eval_samples_per_second": 17.029, |
|
"eval_steps_per_second": 4.257, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.22838499184339314, |
|
"grad_norm": 0.5655252933502197, |
|
"learning_rate": 0.00015102897041285315, |
|
"loss": 0.2144, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.2392604676454595, |
|
"grad_norm": 0.7886796593666077, |
|
"learning_rate": 0.00014433369230867077, |
|
"loss": 0.1604, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.2501359434475258, |
|
"grad_norm": 1.0239461660385132, |
|
"learning_rate": 0.0001374467844093695, |
|
"loss": 0.1419, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.26101141924959215, |
|
"grad_norm": 4.1843976974487305, |
|
"learning_rate": 0.0001304017990379651, |
|
"loss": 0.2756, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.27188689505165853, |
|
"grad_norm": 9.119248390197754, |
|
"learning_rate": 0.0001232330586550277, |
|
"loss": 0.2422, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.27188689505165853, |
|
"eval_loss": 0.259118914604187, |
|
"eval_runtime": 22.8194, |
|
"eval_samples_per_second": 17.003, |
|
"eval_steps_per_second": 4.251, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.28276237085372485, |
|
"grad_norm": 0.4750421941280365, |
|
"learning_rate": 0.00011597548864310363, |
|
"loss": 0.1849, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.2936378466557912, |
|
"grad_norm": 0.6818949580192566, |
|
"learning_rate": 0.00010866444715376263, |
|
"loss": 0.1809, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.3045133224578575, |
|
"grad_norm": 0.9162408113479614, |
|
"learning_rate": 0.00010133555284623744, |
|
"loss": 0.0971, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3153887982599239, |
|
"grad_norm": 1.3977360725402832, |
|
"learning_rate": 9.402451135689641e-05, |
|
"loss": 0.2537, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.3262642740619902, |
|
"grad_norm": 3.654791831970215, |
|
"learning_rate": 8.676694134497232e-05, |
|
"loss": 0.3683, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.3262642740619902, |
|
"eval_loss": 0.22318749129772186, |
|
"eval_runtime": 22.8297, |
|
"eval_samples_per_second": 16.995, |
|
"eval_steps_per_second": 4.249, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.33713974986405654, |
|
"grad_norm": 0.3159692585468292, |
|
"learning_rate": 7.95982009620349e-05, |
|
"loss": 0.1784, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.3480152256661229, |
|
"grad_norm": 0.8129618763923645, |
|
"learning_rate": 7.255321559063053e-05, |
|
"loss": 0.1069, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.35889070146818924, |
|
"grad_norm": 1.2382850646972656, |
|
"learning_rate": 6.566630769132923e-05, |
|
"loss": 0.13, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.36976617727025557, |
|
"grad_norm": 2.3374767303466797, |
|
"learning_rate": 5.897102958714686e-05, |
|
"loss": 0.2035, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.3806416530723219, |
|
"grad_norm": 2.183784008026123, |
|
"learning_rate": 5.250000000000002e-05, |
|
"loss": 0.2272, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.3806416530723219, |
|
"eval_loss": 0.16593347489833832, |
|
"eval_runtime": 22.788, |
|
"eval_samples_per_second": 17.026, |
|
"eval_steps_per_second": 4.257, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.3915171288743883, |
|
"grad_norm": 0.5802284479141235, |
|
"learning_rate": 4.62847451355716e-05, |
|
"loss": 0.1449, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.4023926046764546, |
|
"grad_norm": 2.2887635231018066, |
|
"learning_rate": 4.035554509080588e-05, |
|
"loss": 0.1412, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.4132680804785209, |
|
"grad_norm": 1.0761778354644775, |
|
"learning_rate": 3.474128633231992e-05, |
|
"loss": 0.1024, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.42414355628058725, |
|
"grad_norm": 1.304354190826416, |
|
"learning_rate": 2.946932096444165e-05, |
|
"loss": 0.1266, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.43501903208265363, |
|
"grad_norm": 2.448615789413452, |
|
"learning_rate": 2.456533347250732e-05, |
|
"loss": 0.1724, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.43501903208265363, |
|
"eval_loss": 0.1496591418981552, |
|
"eval_runtime": 22.6015, |
|
"eval_samples_per_second": 17.167, |
|
"eval_steps_per_second": 4.292, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.44589450788471996, |
|
"grad_norm": 0.5191034078598022, |
|
"learning_rate": 2.005321559063053e-05, |
|
"loss": 0.1327, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.4567699836867863, |
|
"grad_norm": 0.6500219106674194, |
|
"learning_rate": 1.5954949903575276e-05, |
|
"loss": 0.1289, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.4676454594888526, |
|
"grad_norm": 0.7300875186920166, |
|
"learning_rate": 1.2290502749812666e-05, |
|
"loss": 0.0955, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.478520935290919, |
|
"grad_norm": 0.7386441230773926, |
|
"learning_rate": 9.077726947526898e-06, |
|
"loss": 0.1079, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.4893964110929853, |
|
"grad_norm": 1.8114835023880005, |
|
"learning_rate": 6.332274817479627e-06, |
|
"loss": 0.1624, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.4893964110929853, |
|
"eval_loss": 0.1295737773180008, |
|
"eval_runtime": 22.5929, |
|
"eval_samples_per_second": 17.174, |
|
"eval_steps_per_second": 4.293, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.5002718868950516, |
|
"grad_norm": 0.49667298793792725, |
|
"learning_rate": 4.067521926476516e-06, |
|
"loss": 0.1261, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.511147362697118, |
|
"grad_norm": 0.6079006195068359, |
|
"learning_rate": 2.294501922950403e-06, |
|
"loss": 0.0965, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.5220228384991843, |
|
"grad_norm": 1.0314648151397705, |
|
"learning_rate": 1.021852782135112e-06, |
|
"loss": 0.1093, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.5328983143012507, |
|
"grad_norm": 0.8024740815162659, |
|
"learning_rate": 2.5577472271845927e-07, |
|
"loss": 0.1305, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.5437737901033171, |
|
"grad_norm": 1.7846750020980835, |
|
"learning_rate": 0.0, |
|
"loss": 0.1485, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.5437737901033171, |
|
"eval_loss": 0.12700584530830383, |
|
"eval_runtime": 22.7807, |
|
"eval_samples_per_second": 17.032, |
|
"eval_steps_per_second": 4.258, |
|
"step": 500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.002332739076096e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|