|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 1400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.014285714285714285, |
|
"grad_norm": 2.207634687423706, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 2.5979, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02857142857142857, |
|
"grad_norm": 1.5117173194885254, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 2.3131, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04285714285714286, |
|
"grad_norm": 1.7237502336502075, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 1.7843, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05714285714285714, |
|
"grad_norm": 1.895707130432129, |
|
"learning_rate": 5.714285714285714e-05, |
|
"loss": 1.2737, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 1.7597640752792358, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 1.0504, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08571428571428572, |
|
"grad_norm": 1.589367151260376, |
|
"learning_rate": 8.571428571428571e-05, |
|
"loss": 0.9498, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 1.5316022634506226, |
|
"learning_rate": 0.0001, |
|
"loss": 0.8856, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.11428571428571428, |
|
"grad_norm": 1.638037919998169, |
|
"learning_rate": 9.841269841269841e-05, |
|
"loss": 0.8257, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.12857142857142856, |
|
"grad_norm": 1.3244397640228271, |
|
"learning_rate": 9.682539682539682e-05, |
|
"loss": 0.7503, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 1.434253215789795, |
|
"learning_rate": 9.523809523809524e-05, |
|
"loss": 0.7035, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.15714285714285714, |
|
"grad_norm": 1.5084493160247803, |
|
"learning_rate": 9.365079365079366e-05, |
|
"loss": 0.6422, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.17142857142857143, |
|
"grad_norm": 1.704795002937317, |
|
"learning_rate": 9.206349206349206e-05, |
|
"loss": 0.6382, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.18571428571428572, |
|
"grad_norm": 1.784338116645813, |
|
"learning_rate": 9.047619047619048e-05, |
|
"loss": 0.6329, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 1.8456165790557861, |
|
"learning_rate": 8.888888888888889e-05, |
|
"loss": 0.6412, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 1.192288875579834, |
|
"learning_rate": 8.730158730158731e-05, |
|
"loss": 0.591, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.22857142857142856, |
|
"grad_norm": 1.7664241790771484, |
|
"learning_rate": 8.571428571428571e-05, |
|
"loss": 0.5889, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.24285714285714285, |
|
"grad_norm": 1.4029046297073364, |
|
"learning_rate": 8.412698412698413e-05, |
|
"loss": 0.5976, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.2571428571428571, |
|
"grad_norm": 1.2247138023376465, |
|
"learning_rate": 8.253968253968255e-05, |
|
"loss": 0.6098, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.2714285714285714, |
|
"grad_norm": 1.9247369766235352, |
|
"learning_rate": 8.095238095238096e-05, |
|
"loss": 0.6056, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 1.2571682929992676, |
|
"learning_rate": 7.936507936507937e-05, |
|
"loss": 0.5946, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 1.4346907138824463, |
|
"learning_rate": 7.777777777777778e-05, |
|
"loss": 0.5899, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.3142857142857143, |
|
"grad_norm": 1.5911189317703247, |
|
"learning_rate": 7.619047619047618e-05, |
|
"loss": 0.5629, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.32857142857142857, |
|
"grad_norm": 1.4303562641143799, |
|
"learning_rate": 7.460317460317461e-05, |
|
"loss": 0.6007, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.34285714285714286, |
|
"grad_norm": 1.5196324586868286, |
|
"learning_rate": 7.301587301587302e-05, |
|
"loss": 0.5917, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 1.4185888767242432, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 0.5666, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.37142857142857144, |
|
"grad_norm": 1.416913390159607, |
|
"learning_rate": 6.984126984126984e-05, |
|
"loss": 0.5468, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.38571428571428573, |
|
"grad_norm": 1.3123226165771484, |
|
"learning_rate": 6.825396825396825e-05, |
|
"loss": 0.5381, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 1.4564621448516846, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.5255, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.4142857142857143, |
|
"grad_norm": 1.3417844772338867, |
|
"learning_rate": 6.507936507936509e-05, |
|
"loss": 0.5563, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 1.168533205986023, |
|
"learning_rate": 6.349206349206349e-05, |
|
"loss": 0.5624, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.44285714285714284, |
|
"grad_norm": 1.4705379009246826, |
|
"learning_rate": 6.19047619047619e-05, |
|
"loss": 0.5378, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.45714285714285713, |
|
"grad_norm": 1.3392393589019775, |
|
"learning_rate": 6.0317460317460316e-05, |
|
"loss": 0.5751, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.4714285714285714, |
|
"grad_norm": 1.2585370540618896, |
|
"learning_rate": 5.873015873015873e-05, |
|
"loss": 0.5262, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.4857142857142857, |
|
"grad_norm": 1.7023590803146362, |
|
"learning_rate": 5.714285714285714e-05, |
|
"loss": 0.505, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 1.3246266841888428, |
|
"learning_rate": 5.555555555555556e-05, |
|
"loss": 0.5401, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.5142857142857142, |
|
"grad_norm": 1.4986284971237183, |
|
"learning_rate": 5.396825396825397e-05, |
|
"loss": 0.5108, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.5285714285714286, |
|
"grad_norm": 1.5356686115264893, |
|
"learning_rate": 5.2380952380952384e-05, |
|
"loss": 0.5036, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.5428571428571428, |
|
"grad_norm": 1.670077919960022, |
|
"learning_rate": 5.0793650793650794e-05, |
|
"loss": 0.5584, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.5571428571428572, |
|
"grad_norm": 1.5580649375915527, |
|
"learning_rate": 4.9206349206349204e-05, |
|
"loss": 0.5049, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 1.558235764503479, |
|
"learning_rate": 4.761904761904762e-05, |
|
"loss": 0.5181, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.5857142857142857, |
|
"grad_norm": 1.753937840461731, |
|
"learning_rate": 4.603174603174603e-05, |
|
"loss": 0.5237, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 1.6312466859817505, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.5137, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.6142857142857143, |
|
"grad_norm": 1.388678789138794, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 0.5176, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.6285714285714286, |
|
"grad_norm": 1.627751350402832, |
|
"learning_rate": 4.126984126984127e-05, |
|
"loss": 0.4901, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 1.627482295036316, |
|
"learning_rate": 3.968253968253968e-05, |
|
"loss": 0.5166, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.6571428571428571, |
|
"grad_norm": 1.5321110486984253, |
|
"learning_rate": 3.809523809523809e-05, |
|
"loss": 0.5197, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.6714285714285714, |
|
"grad_norm": 1.7120718955993652, |
|
"learning_rate": 3.650793650793651e-05, |
|
"loss": 0.5043, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.6857142857142857, |
|
"grad_norm": 1.5418710708618164, |
|
"learning_rate": 3.492063492063492e-05, |
|
"loss": 0.5025, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 1.376985788345337, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.4952, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 1.3709187507629395, |
|
"learning_rate": 3.1746031746031745e-05, |
|
"loss": 0.4808, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.7285714285714285, |
|
"grad_norm": 1.600040078163147, |
|
"learning_rate": 3.0158730158730158e-05, |
|
"loss": 0.4876, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.7428571428571429, |
|
"grad_norm": 1.501184105873108, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 0.4659, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.7571428571428571, |
|
"grad_norm": 1.4377880096435547, |
|
"learning_rate": 2.6984126984126984e-05, |
|
"loss": 0.4625, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.7714285714285715, |
|
"grad_norm": 1.2743709087371826, |
|
"learning_rate": 2.5396825396825397e-05, |
|
"loss": 0.4736, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.7857142857142857, |
|
"grad_norm": 1.6590967178344727, |
|
"learning_rate": 2.380952380952381e-05, |
|
"loss": 0.4854, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.5961614847183228, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.4593, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.8142857142857143, |
|
"grad_norm": 1.3354884386062622, |
|
"learning_rate": 2.0634920634920636e-05, |
|
"loss": 0.4753, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.8285714285714286, |
|
"grad_norm": 1.3281641006469727, |
|
"learning_rate": 1.9047619047619046e-05, |
|
"loss": 0.4708, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.8428571428571429, |
|
"grad_norm": 1.233601450920105, |
|
"learning_rate": 1.746031746031746e-05, |
|
"loss": 0.4891, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 1.3751627206802368, |
|
"learning_rate": 1.5873015873015872e-05, |
|
"loss": 0.4936, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.8714285714285714, |
|
"grad_norm": 1.4707367420196533, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.4635, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.8857142857142857, |
|
"grad_norm": 1.5429284572601318, |
|
"learning_rate": 1.2698412698412699e-05, |
|
"loss": 0.4773, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 1.4306563138961792, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.4517, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.9142857142857143, |
|
"grad_norm": 1.6000938415527344, |
|
"learning_rate": 9.523809523809523e-06, |
|
"loss": 0.453, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.9285714285714286, |
|
"grad_norm": 1.5970221757888794, |
|
"learning_rate": 7.936507936507936e-06, |
|
"loss": 0.4729, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.9428571428571428, |
|
"grad_norm": 1.4147007465362549, |
|
"learning_rate": 6.349206349206349e-06, |
|
"loss": 0.4797, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.9571428571428572, |
|
"grad_norm": 1.682041049003601, |
|
"learning_rate": 4.7619047619047615e-06, |
|
"loss": 0.4969, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.9714285714285714, |
|
"grad_norm": 1.436335563659668, |
|
"learning_rate": 3.1746031746031746e-06, |
|
"loss": 0.4906, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.9857142857142858, |
|
"grad_norm": 1.7256879806518555, |
|
"learning_rate": 1.5873015873015873e-06, |
|
"loss": 0.458, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.6402218341827393, |
|
"learning_rate": 0.0, |
|
"loss": 0.4668, |
|
"step": 1400 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 1400, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8419093040332800.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|