{ "best_metric": null, "best_model_checkpoint": null, "epoch": 6.0, "eval_steps": 500, "global_step": 31710, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0946073793755913, "grad_norm": 26.54265022277832, "learning_rate": 2.9526963103122042e-05, "loss": 1.3126, "step": 500 }, { "epoch": 0.1892147587511826, "grad_norm": 16.264204025268555, "learning_rate": 2.905392620624409e-05, "loss": 0.9908, "step": 1000 }, { "epoch": 0.28382213812677387, "grad_norm": 15.488258361816406, "learning_rate": 2.8580889309366132e-05, "loss": 0.924, "step": 1500 }, { "epoch": 0.3784295175023652, "grad_norm": 17.02219581604004, "learning_rate": 2.8107852412488173e-05, "loss": 0.8811, "step": 2000 }, { "epoch": 0.47303689687795647, "grad_norm": 20.8419132232666, "learning_rate": 2.763481551561022e-05, "loss": 0.8707, "step": 2500 }, { "epoch": 0.5676442762535477, "grad_norm": 14.816078186035156, "learning_rate": 2.7161778618732263e-05, "loss": 0.8418, "step": 3000 }, { "epoch": 0.6622516556291391, "grad_norm": 16.095823287963867, "learning_rate": 2.6688741721854305e-05, "loss": 0.8021, "step": 3500 }, { "epoch": 0.7568590350047304, "grad_norm": 16.451648712158203, "learning_rate": 2.6215704824976346e-05, "loss": 0.8166, "step": 4000 }, { "epoch": 0.8514664143803217, "grad_norm": 23.100683212280273, "learning_rate": 2.5742667928098394e-05, "loss": 0.7894, "step": 4500 }, { "epoch": 0.9460737937559129, "grad_norm": 16.882429122924805, "learning_rate": 2.5269631031220436e-05, "loss": 0.7754, "step": 5000 }, { "epoch": 1.0406811731315042, "grad_norm": 14.48630142211914, "learning_rate": 2.4796594134342477e-05, "loss": 0.7216, "step": 5500 }, { "epoch": 1.1352885525070955, "grad_norm": 16.525306701660156, "learning_rate": 2.4323557237464526e-05, "loss": 0.6325, "step": 6000 }, { "epoch": 1.2298959318826868, "grad_norm": 19.755531311035156, "learning_rate": 2.3850520340586567e-05, "loss": 0.6503, "step": 6500 }, { "epoch": 1.3245033112582782, "grad_norm": 9.933408737182617, "learning_rate": 2.337748344370861e-05, "loss": 0.6369, "step": 7000 }, { "epoch": 1.4191106906338695, "grad_norm": 21.490463256835938, "learning_rate": 2.2904446546830654e-05, "loss": 0.6411, "step": 7500 }, { "epoch": 1.5137180700094608, "grad_norm": 16.791980743408203, "learning_rate": 2.24314096499527e-05, "loss": 0.6319, "step": 8000 }, { "epoch": 1.608325449385052, "grad_norm": 14.076953887939453, "learning_rate": 2.195837275307474e-05, "loss": 0.6282, "step": 8500 }, { "epoch": 1.7029328287606433, "grad_norm": 23.48201560974121, "learning_rate": 2.148533585619678e-05, "loss": 0.6325, "step": 9000 }, { "epoch": 1.7975402081362346, "grad_norm": 32.473262786865234, "learning_rate": 2.101229895931883e-05, "loss": 0.6411, "step": 9500 }, { "epoch": 1.8921475875118259, "grad_norm": 17.878799438476562, "learning_rate": 2.053926206244087e-05, "loss": 0.6314, "step": 10000 }, { "epoch": 1.9867549668874172, "grad_norm": 11.421870231628418, "learning_rate": 2.0066225165562913e-05, "loss": 0.6474, "step": 10500 }, { "epoch": 2.0813623462630084, "grad_norm": 23.016128540039062, "learning_rate": 1.959318826868496e-05, "loss": 0.4893, "step": 11000 }, { "epoch": 2.1759697256385997, "grad_norm": 14.312499046325684, "learning_rate": 1.9120151371807002e-05, "loss": 0.4706, "step": 11500 }, { "epoch": 2.270577105014191, "grad_norm": 16.817712783813477, "learning_rate": 1.8647114474929044e-05, "loss": 0.4662, "step": 12000 }, { "epoch": 2.3651844843897822, "grad_norm": 25.88548469543457, "learning_rate": 1.8174077578051085e-05, "loss": 0.4881, "step": 12500 }, { "epoch": 2.4597918637653735, "grad_norm": 12.041430473327637, "learning_rate": 1.7701040681173134e-05, "loss": 0.4904, "step": 13000 }, { "epoch": 2.5543992431409652, "grad_norm": 14.665155410766602, "learning_rate": 1.7228003784295175e-05, "loss": 0.4981, "step": 13500 }, { "epoch": 2.6490066225165565, "grad_norm": 29.060611724853516, "learning_rate": 1.6754966887417217e-05, "loss": 0.4832, "step": 14000 }, { "epoch": 2.7436140018921478, "grad_norm": 20.540754318237305, "learning_rate": 1.6281929990539265e-05, "loss": 0.4713, "step": 14500 }, { "epoch": 2.838221381267739, "grad_norm": 24.385042190551758, "learning_rate": 1.5808893093661306e-05, "loss": 0.4775, "step": 15000 }, { "epoch": 2.9328287606433303, "grad_norm": 12.536005020141602, "learning_rate": 1.5335856196783348e-05, "loss": 0.4776, "step": 15500 }, { "epoch": 3.0274361400189216, "grad_norm": 14.910382270812988, "learning_rate": 1.4862819299905393e-05, "loss": 0.4355, "step": 16000 }, { "epoch": 3.122043519394513, "grad_norm": 14.342169761657715, "learning_rate": 1.4389782403027438e-05, "loss": 0.3343, "step": 16500 }, { "epoch": 3.216650898770104, "grad_norm": 16.1813907623291, "learning_rate": 1.3916745506149479e-05, "loss": 0.3452, "step": 17000 }, { "epoch": 3.3112582781456954, "grad_norm": 9.025822639465332, "learning_rate": 1.3443708609271524e-05, "loss": 0.3522, "step": 17500 }, { "epoch": 3.4058656575212867, "grad_norm": 25.14023780822754, "learning_rate": 1.2970671712393567e-05, "loss": 0.3408, "step": 18000 }, { "epoch": 3.500473036896878, "grad_norm": 29.443084716796875, "learning_rate": 1.249763481551561e-05, "loss": 0.3414, "step": 18500 }, { "epoch": 3.595080416272469, "grad_norm": 30.374174118041992, "learning_rate": 1.2024597918637655e-05, "loss": 0.3511, "step": 19000 }, { "epoch": 3.6896877956480605, "grad_norm": 19.67069435119629, "learning_rate": 1.1551561021759697e-05, "loss": 0.3505, "step": 19500 }, { "epoch": 3.7842951750236518, "grad_norm": 14.601593971252441, "learning_rate": 1.1078524124881742e-05, "loss": 0.3415, "step": 20000 }, { "epoch": 3.878902554399243, "grad_norm": 19.330163955688477, "learning_rate": 1.0605487228003785e-05, "loss": 0.3509, "step": 20500 }, { "epoch": 3.9735099337748343, "grad_norm": 19.882015228271484, "learning_rate": 1.0132450331125828e-05, "loss": 0.3486, "step": 21000 }, { "epoch": 4.068117313150426, "grad_norm": 8.843262672424316, "learning_rate": 9.659413434247873e-06, "loss": 0.259, "step": 21500 }, { "epoch": 4.162724692526017, "grad_norm": 38.07181930541992, "learning_rate": 9.186376537369914e-06, "loss": 0.2325, "step": 22000 }, { "epoch": 4.257332071901608, "grad_norm": 16.061630249023438, "learning_rate": 8.713339640491959e-06, "loss": 0.2346, "step": 22500 }, { "epoch": 4.351939451277199, "grad_norm": 14.632463455200195, "learning_rate": 8.240302743614002e-06, "loss": 0.2365, "step": 23000 }, { "epoch": 4.446546830652791, "grad_norm": 9.404703140258789, "learning_rate": 7.767265846736046e-06, "loss": 0.2347, "step": 23500 }, { "epoch": 4.541154210028382, "grad_norm": 6.847620964050293, "learning_rate": 7.2942289498580895e-06, "loss": 0.2326, "step": 24000 }, { "epoch": 4.635761589403973, "grad_norm": 14.490519523620605, "learning_rate": 6.821192052980133e-06, "loss": 0.2321, "step": 24500 }, { "epoch": 4.7303689687795645, "grad_norm": 27.82660675048828, "learning_rate": 6.348155156102176e-06, "loss": 0.2459, "step": 25000 }, { "epoch": 4.824976348155156, "grad_norm": 22.924592971801758, "learning_rate": 5.87511825922422e-06, "loss": 0.2418, "step": 25500 }, { "epoch": 4.919583727530747, "grad_norm": 22.808650970458984, "learning_rate": 5.402081362346263e-06, "loss": 0.2272, "step": 26000 }, { "epoch": 5.014191106906338, "grad_norm": 8.198173522949219, "learning_rate": 4.929044465468307e-06, "loss": 0.2201, "step": 26500 }, { "epoch": 5.10879848628193, "grad_norm": 20.825037002563477, "learning_rate": 4.45600756859035e-06, "loss": 0.1464, "step": 27000 }, { "epoch": 5.203405865657521, "grad_norm": 21.507328033447266, "learning_rate": 3.9829706717123935e-06, "loss": 0.1451, "step": 27500 }, { "epoch": 5.298013245033113, "grad_norm": 11.167850494384766, "learning_rate": 3.509933774834437e-06, "loss": 0.1417, "step": 28000 }, { "epoch": 5.392620624408704, "grad_norm": 20.28238868713379, "learning_rate": 3.0368968779564807e-06, "loss": 0.1527, "step": 28500 }, { "epoch": 5.4872280037842955, "grad_norm": 13.64089584350586, "learning_rate": 2.5638599810785243e-06, "loss": 0.148, "step": 29000 }, { "epoch": 5.581835383159887, "grad_norm": 16.22063446044922, "learning_rate": 2.0908230842005675e-06, "loss": 0.1446, "step": 29500 }, { "epoch": 5.676442762535478, "grad_norm": 25.130605697631836, "learning_rate": 1.6177861873226113e-06, "loss": 0.1401, "step": 30000 }, { "epoch": 5.771050141911069, "grad_norm": 8.35947036743164, "learning_rate": 1.1447492904446547e-06, "loss": 0.1488, "step": 30500 }, { "epoch": 5.865657521286661, "grad_norm": 15.456842422485352, "learning_rate": 6.717123935666982e-07, "loss": 0.1388, "step": 31000 }, { "epoch": 5.960264900662252, "grad_norm": 25.6274356842041, "learning_rate": 1.9867549668874173e-07, "loss": 0.1381, "step": 31500 }, { "epoch": 6.0, "step": 31710, "total_flos": 1.6805564390366208e+16, "train_loss": 0.4557977186772621, "train_runtime": 8236.6278, "train_samples_per_second": 123.188, "train_steps_per_second": 3.85 } ], "logging_steps": 500, "max_steps": 31710, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.6805564390366208e+16, "train_batch_size": 32, "trial_name": null, "trial_params": null }