|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 24.868123587038433, |
|
"eval_steps": 500, |
|
"global_step": 4125, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06028636021100226, |
|
"grad_norm": 14.353901908542692, |
|
"learning_rate": 5e-06, |
|
"loss": 1.0595, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12057272042200452, |
|
"grad_norm": 1.3180865308999647, |
|
"learning_rate": 5e-06, |
|
"loss": 0.9403, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1808590806330068, |
|
"grad_norm": 0.9782137323160548, |
|
"learning_rate": 5e-06, |
|
"loss": 0.89, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.24114544084400905, |
|
"grad_norm": 1.3518217541449848, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8618, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.30143180105501133, |
|
"grad_norm": 0.8934828143592951, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8447, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3617181612660136, |
|
"grad_norm": 0.9919016971761021, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8318, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.42200452147701584, |
|
"grad_norm": 0.9227497685306041, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8177, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.4822908816880181, |
|
"grad_norm": 0.9363087183332998, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8106, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.5425772418990203, |
|
"grad_norm": 1.0736624468965914, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8052, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.6028636021100227, |
|
"grad_norm": 1.353564519927803, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7907, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.6631499623210249, |
|
"grad_norm": 1.0455405570196998, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7913, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.7234363225320272, |
|
"grad_norm": 0.9569192147667731, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7828, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.7837226827430294, |
|
"grad_norm": 0.8298672277424939, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7841, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.8440090429540317, |
|
"grad_norm": 1.2437896161299962, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7832, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.9042954031650339, |
|
"grad_norm": 0.6290099294011746, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7818, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.9645817633760362, |
|
"grad_norm": 0.683050547264518, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7807, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.9947249434815373, |
|
"eval_loss": 0.7690147161483765, |
|
"eval_runtime": 114.9859, |
|
"eval_samples_per_second": 38.866, |
|
"eval_steps_per_second": 0.609, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.0248681235870385, |
|
"grad_norm": 0.8375316338814506, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7706, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.0851544837980407, |
|
"grad_norm": 0.8491856111019711, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7207, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.145440844009043, |
|
"grad_norm": 1.00264106211233, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7264, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.2057272042200453, |
|
"grad_norm": 0.9524591725477187, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7223, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.2660135644310475, |
|
"grad_norm": 0.8200876998100735, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7237, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.3262999246420497, |
|
"grad_norm": 0.6972921329207209, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7233, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.3865862848530521, |
|
"grad_norm": 1.3472894348419586, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7175, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.4468726450640543, |
|
"grad_norm": 0.8080326013334395, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7203, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.5071590052750565, |
|
"grad_norm": 0.6690842132293575, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7184, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.5674453654860587, |
|
"grad_norm": 0.6680726643782731, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7193, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.627731725697061, |
|
"grad_norm": 0.6582407897638277, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7171, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.6880180859080633, |
|
"grad_norm": 0.6058749327234759, |
|
"learning_rate": 5e-06, |
|
"loss": 0.718, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.7483044461190655, |
|
"grad_norm": 0.6771950284954183, |
|
"learning_rate": 5e-06, |
|
"loss": 0.717, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.8085908063300677, |
|
"grad_norm": 0.620278984859575, |
|
"learning_rate": 5e-06, |
|
"loss": 0.721, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.8688771665410702, |
|
"grad_norm": 0.7376417188133093, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7187, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.9291635267520724, |
|
"grad_norm": 0.841815742734191, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7168, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.9894498869630746, |
|
"grad_norm": 0.9694154154724743, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7139, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.995478522984175, |
|
"eval_loss": 0.7520068287849426, |
|
"eval_runtime": 112.311, |
|
"eval_samples_per_second": 39.791, |
|
"eval_steps_per_second": 0.623, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 2.049736247174077, |
|
"grad_norm": 1.3857092066503702, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6895, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.110022607385079, |
|
"grad_norm": 0.9308939279408932, |
|
"learning_rate": 5e-06, |
|
"loss": 0.661, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.1703089675960814, |
|
"grad_norm": 1.7049059921843441, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6596, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.230595327807084, |
|
"grad_norm": 1.1664692858604568, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6626, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.290881688018086, |
|
"grad_norm": 0.8099811209522968, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6609, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.351168048229088, |
|
"grad_norm": 0.7769380652338045, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6633, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.4114544084400906, |
|
"grad_norm": 1.0202300923789958, |
|
"learning_rate": 5e-06, |
|
"loss": 0.667, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.4717407686510926, |
|
"grad_norm": 0.9120675026610565, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6654, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.532027128862095, |
|
"grad_norm": 0.8057600298686861, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6643, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.5923134890730974, |
|
"grad_norm": 0.6688016636812687, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6657, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.6525998492840994, |
|
"grad_norm": 0.7419224625762498, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6669, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.712886209495102, |
|
"grad_norm": 0.7294710798953756, |
|
"learning_rate": 5e-06, |
|
"loss": 0.664, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.7731725697061043, |
|
"grad_norm": 0.7239503892331308, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6628, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.8334589299171062, |
|
"grad_norm": 0.6873637028133951, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6634, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.8937452901281087, |
|
"grad_norm": 0.7070671521244221, |
|
"learning_rate": 5e-06, |
|
"loss": 0.67, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.9540316503391106, |
|
"grad_norm": 0.6467492636416056, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6642, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.9962321024868124, |
|
"eval_loss": 0.7524702548980713, |
|
"eval_runtime": 115.8914, |
|
"eval_samples_per_second": 38.562, |
|
"eval_steps_per_second": 0.604, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 3.014318010550113, |
|
"grad_norm": 1.2600144566497484, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6595, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 3.0746043707611155, |
|
"grad_norm": 0.9439871932745402, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6121, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 3.1348907309721175, |
|
"grad_norm": 0.9269396364377557, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6091, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 3.19517709118312, |
|
"grad_norm": 1.4073687560739623, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6146, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 3.255463451394122, |
|
"grad_norm": 1.0909582280984875, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6135, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 3.3157498116051243, |
|
"grad_norm": 1.6855484277386785, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6181, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 3.3760361718161267, |
|
"grad_norm": 1.3960879287332164, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6204, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 3.4363225320271287, |
|
"grad_norm": 1.3111215484703225, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6166, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 3.496608892238131, |
|
"grad_norm": 1.4456626678129865, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6187, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 3.5568952524491335, |
|
"grad_norm": 0.9267972418344134, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6198, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 3.6171816126601355, |
|
"grad_norm": 1.5516401845259948, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6216, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.677467972871138, |
|
"grad_norm": 1.5014637761221297, |
|
"learning_rate": 5e-06, |
|
"loss": 0.618, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 3.7377543330821403, |
|
"grad_norm": 1.1059687872166368, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6185, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 3.7980406932931423, |
|
"grad_norm": 1.2950526783062104, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6207, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.8583270535041447, |
|
"grad_norm": 0.9542402867171873, |
|
"learning_rate": 5e-06, |
|
"loss": 0.618, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.918613413715147, |
|
"grad_norm": 0.7924967732032556, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6163, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 3.978899773926149, |
|
"grad_norm": 0.9450387587222794, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6186, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.99698568198945, |
|
"eval_loss": 0.7615257501602173, |
|
"eval_runtime": 111.9191, |
|
"eval_samples_per_second": 39.931, |
|
"eval_steps_per_second": 0.625, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 4.039186134137151, |
|
"grad_norm": 0.9971689199182937, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5958, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 4.099472494348154, |
|
"grad_norm": 0.7965799505534403, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5627, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 4.159758854559156, |
|
"grad_norm": 0.8634698321641859, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5642, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 4.220045214770158, |
|
"grad_norm": 0.8538005391887241, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5721, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 4.280331574981161, |
|
"grad_norm": 0.8087517000342034, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5702, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 4.340617935192163, |
|
"grad_norm": 0.7416451232136964, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5735, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 4.400904295403165, |
|
"grad_norm": 0.683331985209307, |
|
"learning_rate": 5e-06, |
|
"loss": 0.576, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 4.461190655614168, |
|
"grad_norm": 0.7531824862918711, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5747, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 4.52147701582517, |
|
"grad_norm": 0.6679489381777426, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5788, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 4.581763376036172, |
|
"grad_norm": 0.8184328898914945, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5773, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 4.642049736247174, |
|
"grad_norm": 0.7709343247764259, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5747, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 4.702336096458176, |
|
"grad_norm": 0.8008305213299817, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5789, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 4.762622456669178, |
|
"grad_norm": 0.770151242560135, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5808, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 4.822908816880181, |
|
"grad_norm": 0.8007315199653617, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5766, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.883195177091183, |
|
"grad_norm": 0.8153924743627864, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5794, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 4.943481537302185, |
|
"grad_norm": 0.7155989123366884, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5777, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 4.997739261492088, |
|
"eval_loss": 0.7784610390663147, |
|
"eval_runtime": 113.9486, |
|
"eval_samples_per_second": 39.219, |
|
"eval_steps_per_second": 0.614, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 5.003767897513188, |
|
"grad_norm": 1.5968784870292558, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5864, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 5.06405425772419, |
|
"grad_norm": 1.3160101230444552, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5136, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 5.124340617935192, |
|
"grad_norm": 0.9218240884563489, |
|
"learning_rate": 5e-06, |
|
"loss": 0.514, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 5.184626978146194, |
|
"grad_norm": 0.943292128668918, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5129, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 5.244913338357197, |
|
"grad_norm": 0.9210116431359558, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5147, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 5.305199698568199, |
|
"grad_norm": 0.9123827846726384, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5152, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 5.365486058779201, |
|
"grad_norm": 0.8967759483580899, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5246, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 5.425772418990204, |
|
"grad_norm": 1.0833892045281441, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5173, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 5.486058779201206, |
|
"grad_norm": 0.8452587501323132, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5205, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 5.546345139412208, |
|
"grad_norm": 0.9036749110113976, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5213, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 5.6066314996232105, |
|
"grad_norm": 0.8270647016134544, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5214, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 5.6669178598342125, |
|
"grad_norm": 0.760029827349666, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5233, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 5.7272042200452145, |
|
"grad_norm": 1.1680570931837515, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5284, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 5.787490580256217, |
|
"grad_norm": 1.0453134822125045, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5273, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 5.847776940467219, |
|
"grad_norm": 0.8063546838482688, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5265, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 5.908063300678221, |
|
"grad_norm": 0.8921967199529918, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5316, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 5.968349660889224, |
|
"grad_norm": 0.8942899369042694, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5287, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 5.998492840994725, |
|
"eval_loss": 0.815434992313385, |
|
"eval_runtime": 116.1895, |
|
"eval_samples_per_second": 38.463, |
|
"eval_steps_per_second": 0.602, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 6.028636021100226, |
|
"grad_norm": 1.6908391333792119, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5023, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 6.088922381311228, |
|
"grad_norm": 1.2916624414612379, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4578, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 6.149208741522231, |
|
"grad_norm": 1.1040568786409888, |
|
"learning_rate": 5e-06, |
|
"loss": 0.454, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 6.209495101733233, |
|
"grad_norm": 1.0463754179790778, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4592, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 6.269781461944235, |
|
"grad_norm": 1.0493678707628407, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4582, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 6.330067822155238, |
|
"grad_norm": 1.105648007217362, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4593, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 6.39035418236624, |
|
"grad_norm": 1.0291790248523935, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4629, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 6.450640542577242, |
|
"grad_norm": 1.2514460402035084, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4634, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 6.510926902788244, |
|
"grad_norm": 0.8518373738036685, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4654, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 6.571213262999247, |
|
"grad_norm": 1.071998502866085, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4663, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 6.6314996232102486, |
|
"grad_norm": 0.9000534182028347, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4658, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 6.691785983421251, |
|
"grad_norm": 0.8995635602376101, |
|
"learning_rate": 5e-06, |
|
"loss": 0.469, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 6.752072343632253, |
|
"grad_norm": 0.9011803423576781, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4715, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 6.812358703843255, |
|
"grad_norm": 0.9140285189147836, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4763, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 6.872645064054257, |
|
"grad_norm": 1.0476263187239303, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4781, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 6.93293142426526, |
|
"grad_norm": 1.1369535585860062, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4702, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 6.993217784476262, |
|
"grad_norm": 1.2720345806831712, |
|
"learning_rate": 5e-06, |
|
"loss": 0.473, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 6.999246420497363, |
|
"eval_loss": 0.8709951639175415, |
|
"eval_runtime": 116.2672, |
|
"eval_samples_per_second": 38.437, |
|
"eval_steps_per_second": 0.602, |
|
"step": 1161 |
|
}, |
|
{ |
|
"epoch": 7.053504144687264, |
|
"grad_norm": 1.6175687599813067, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4154, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 7.113790504898267, |
|
"grad_norm": 1.2621822187556238, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3985, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 7.174076865109269, |
|
"grad_norm": 1.3109771889362003, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3937, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 7.234363225320271, |
|
"grad_norm": 1.1476426236895934, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3986, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 7.294649585531274, |
|
"grad_norm": 1.1513686323462657, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4003, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 7.354935945742276, |
|
"grad_norm": 1.2318284334824612, |
|
"learning_rate": 5e-06, |
|
"loss": 0.402, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 7.415222305953278, |
|
"grad_norm": 1.3293034219686575, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4041, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 7.475508666164281, |
|
"grad_norm": 1.3126587055332364, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4028, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 7.535795026375283, |
|
"grad_norm": 1.4541660160673635, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4046, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 7.596081386586285, |
|
"grad_norm": 1.1677113248358406, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4064, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 7.6563677467972875, |
|
"grad_norm": 1.2339252526811149, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4057, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 7.7166541070082895, |
|
"grad_norm": 0.9795667261721808, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4105, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 7.776940467219291, |
|
"grad_norm": 1.3122595751880688, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4115, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 7.837226827430294, |
|
"grad_norm": 1.3363743108253807, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4094, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 7.897513187641296, |
|
"grad_norm": 1.2278606177387434, |
|
"learning_rate": 5e-06, |
|
"loss": 0.411, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 7.957799547852298, |
|
"grad_norm": 1.099485555308857, |
|
"learning_rate": 5e-06, |
|
"loss": 0.4134, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 0.9475046396255493, |
|
"eval_runtime": 118.217, |
|
"eval_samples_per_second": 37.803, |
|
"eval_steps_per_second": 0.592, |
|
"step": 1327 |
|
}, |
|
{ |
|
"epoch": 8.0180859080633, |
|
"grad_norm": 2.747551612605114, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3977, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 8.078372268274302, |
|
"grad_norm": 1.7260940558261295, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3441, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 8.138658628485306, |
|
"grad_norm": 1.2842675938679606, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3397, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 8.198944988696308, |
|
"grad_norm": 1.1398592198842168, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3442, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 8.25923134890731, |
|
"grad_norm": 1.153557070127385, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3454, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 8.319517709118312, |
|
"grad_norm": 1.3384373413827295, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3438, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 8.379804069329314, |
|
"grad_norm": 1.3554470516864743, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3493, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 8.440090429540316, |
|
"grad_norm": 1.325212009488347, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3528, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 8.500376789751318, |
|
"grad_norm": 1.74556781346271, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3548, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 8.560663149962322, |
|
"grad_norm": 1.3741632805919384, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3518, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 8.620949510173324, |
|
"grad_norm": 1.3019952245252948, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3521, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 8.681235870384326, |
|
"grad_norm": 1.100365748448614, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3573, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 8.741522230595328, |
|
"grad_norm": 1.3911327396968252, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3574, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 8.80180859080633, |
|
"grad_norm": 1.3853784407579808, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3606, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 8.862094951017333, |
|
"grad_norm": 1.278532404930149, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3567, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 8.922381311228335, |
|
"grad_norm": 1.207780040603517, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3565, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 8.982667671439337, |
|
"grad_norm": 1.262306381996002, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3615, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 8.994724943481538, |
|
"eval_loss": 1.0203213691711426, |
|
"eval_runtime": 114.5597, |
|
"eval_samples_per_second": 39.01, |
|
"eval_steps_per_second": 0.611, |
|
"step": 1492 |
|
}, |
|
{ |
|
"epoch": 9.04295403165034, |
|
"grad_norm": 1.9088952161459727, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3188, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 9.103240391861341, |
|
"grad_norm": 1.5762821812746035, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2921, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 9.163526752072343, |
|
"grad_norm": 1.892762439817433, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2866, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 9.223813112283345, |
|
"grad_norm": 1.5993468972103952, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2913, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 9.284099472494349, |
|
"grad_norm": 1.7180835806783503, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2925, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 9.34438583270535, |
|
"grad_norm": 1.4283006573501846, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2936, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 9.404672192916353, |
|
"grad_norm": 1.472590439034633, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2957, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 9.464958553127355, |
|
"grad_norm": 1.6627373496351208, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2968, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 9.525244913338357, |
|
"grad_norm": 1.6479857681742245, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2968, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 9.585531273549359, |
|
"grad_norm": 1.451039229532527, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2969, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 9.645817633760362, |
|
"grad_norm": 1.338137019241527, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2999, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 9.706103993971364, |
|
"grad_norm": 1.4364529449975805, |
|
"learning_rate": 5e-06, |
|
"loss": 0.301, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 9.766390354182366, |
|
"grad_norm": 1.6181472883899526, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3021, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 9.826676714393368, |
|
"grad_norm": 1.2929981294290136, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3047, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 9.88696307460437, |
|
"grad_norm": 1.5569082204346147, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3039, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 9.947249434815372, |
|
"grad_norm": 1.2611067636631264, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3057, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 9.995478522984175, |
|
"eval_loss": 1.1177444458007812, |
|
"eval_runtime": 116.3468, |
|
"eval_samples_per_second": 38.411, |
|
"eval_steps_per_second": 0.602, |
|
"step": 1658 |
|
}, |
|
{ |
|
"epoch": 10.007535795026376, |
|
"grad_norm": 1.793363117063337, |
|
"learning_rate": 5e-06, |
|
"loss": 0.3015, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 10.067822155237378, |
|
"grad_norm": 1.9015543562249504, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2446, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 10.12810851544838, |
|
"grad_norm": 1.6166742540525376, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2417, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 10.188394875659382, |
|
"grad_norm": 1.5826959730874415, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2407, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 10.248681235870384, |
|
"grad_norm": 1.313614154478162, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2421, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 10.308967596081386, |
|
"grad_norm": 1.8551338463863283, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2432, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 10.369253956292388, |
|
"grad_norm": 1.3937043583045259, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2426, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 10.429540316503392, |
|
"grad_norm": 1.4908947612613312, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2458, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 10.489826676714394, |
|
"grad_norm": 1.3513472181146375, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2468, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 10.550113036925396, |
|
"grad_norm": 1.2442859437459592, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2472, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 10.610399397136398, |
|
"grad_norm": 1.61049205004398, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2497, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 10.6706857573474, |
|
"grad_norm": 1.4687600663257643, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2513, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 10.730972117558402, |
|
"grad_norm": 1.3544154919254643, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2534, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 10.791258477769405, |
|
"grad_norm": 1.4310611416705883, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2534, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 10.851544837980407, |
|
"grad_norm": 1.5722579222869695, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2543, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 10.91183119819141, |
|
"grad_norm": 1.520704900113862, |
|
"learning_rate": 5e-06, |
|
"loss": 0.254, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 10.972117558402411, |
|
"grad_norm": 1.4393868378454422, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2565, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 10.996232102486813, |
|
"eval_loss": 1.236780047416687, |
|
"eval_runtime": 115.9111, |
|
"eval_samples_per_second": 38.555, |
|
"eval_steps_per_second": 0.604, |
|
"step": 1824 |
|
}, |
|
{ |
|
"epoch": 11.032403918613413, |
|
"grad_norm": 2.415959560977028, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2285, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 11.092690278824415, |
|
"grad_norm": 1.5442333094800837, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1975, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 11.152976639035419, |
|
"grad_norm": 2.1161682250412257, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1968, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 11.213262999246421, |
|
"grad_norm": 1.6062184104077026, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1967, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 11.273549359457423, |
|
"grad_norm": 1.6876217090206025, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1937, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 11.333835719668425, |
|
"grad_norm": 1.756341727895367, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1966, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 11.394122079879427, |
|
"grad_norm": 1.5508699967732422, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1997, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 11.454408440090429, |
|
"grad_norm": 1.5669766694757912, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2023, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 11.51469480030143, |
|
"grad_norm": 1.4502558429874435, |
|
"learning_rate": 5e-06, |
|
"loss": 0.202, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 11.574981160512435, |
|
"grad_norm": 1.432868245038754, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2027, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 11.635267520723437, |
|
"grad_norm": 1.944951997696116, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2051, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 11.695553880934439, |
|
"grad_norm": 1.5628919140219797, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2047, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 11.75584024114544, |
|
"grad_norm": 1.8554418586433572, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2036, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 11.816126601356443, |
|
"grad_norm": 1.5083234117594833, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2057, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 11.876412961567446, |
|
"grad_norm": 1.6850324704140989, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2084, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 11.936699321778448, |
|
"grad_norm": 1.518863675506513, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2081, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 11.99698568198945, |
|
"grad_norm": 2.025449919349873, |
|
"learning_rate": 5e-06, |
|
"loss": 0.2099, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 11.99698568198945, |
|
"eval_loss": 1.3552354574203491, |
|
"eval_runtime": 116.1042, |
|
"eval_samples_per_second": 38.491, |
|
"eval_steps_per_second": 0.603, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 12.057272042200452, |
|
"grad_norm": 1.9707275891443514, |
|
"learning_rate": 5e-06, |
|
"loss": 0.161, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 12.117558402411454, |
|
"grad_norm": 1.60811233115537, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1527, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 12.177844762622456, |
|
"grad_norm": 1.6751301942391925, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1541, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 12.238131122833458, |
|
"grad_norm": 1.4492067853763235, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1559, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 12.298417483044462, |
|
"grad_norm": 1.9142452633048677, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1576, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 12.358703843255464, |
|
"grad_norm": 1.4385285667791092, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1589, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 12.418990203466466, |
|
"grad_norm": 1.4928747098321213, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1608, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 12.479276563677468, |
|
"grad_norm": 1.5019521655912937, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1605, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 12.53956292388847, |
|
"grad_norm": 1.874845955363987, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1612, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 12.599849284099472, |
|
"grad_norm": 1.885802643703012, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1614, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 12.660135644310476, |
|
"grad_norm": 1.515486697883317, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1622, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 12.720422004521478, |
|
"grad_norm": 1.7537896738178245, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1644, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 12.78070836473248, |
|
"grad_norm": 1.6471681441773147, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1647, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 12.840994724943481, |
|
"grad_norm": 1.590934554816475, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1648, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 12.901281085154483, |
|
"grad_norm": 1.7730249951617123, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1656, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 12.961567445365485, |
|
"grad_norm": 1.6645957171669876, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1676, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 12.997739261492088, |
|
"eval_loss": 1.5070669651031494, |
|
"eval_runtime": 116.1666, |
|
"eval_samples_per_second": 38.471, |
|
"eval_steps_per_second": 0.603, |
|
"step": 2156 |
|
}, |
|
{ |
|
"epoch": 13.02185380557649, |
|
"grad_norm": 2.462408072338029, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1517, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 13.082140165787491, |
|
"grad_norm": 2.4318360950844804, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1195, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 13.142426525998493, |
|
"grad_norm": 1.7426593578459117, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1201, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 13.202712886209495, |
|
"grad_norm": 2.2383839547509696, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1191, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 13.262999246420497, |
|
"grad_norm": 1.5553317913023317, |
|
"learning_rate": 5e-06, |
|
"loss": 0.118, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 13.323285606631499, |
|
"grad_norm": 1.4669913420252614, |
|
"learning_rate": 5e-06, |
|
"loss": 0.119, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 13.383571966842501, |
|
"grad_norm": 1.8080283494554128, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1196, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 13.443858327053505, |
|
"grad_norm": 1.646647327329277, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1233, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 13.504144687264507, |
|
"grad_norm": 1.8533611593149397, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1241, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 13.564431047475509, |
|
"grad_norm": 1.6777607818909321, |
|
"learning_rate": 5e-06, |
|
"loss": 0.126, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 13.62471740768651, |
|
"grad_norm": 1.7745416454251923, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1266, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 13.685003767897513, |
|
"grad_norm": 1.7096006593564836, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1282, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 13.745290128108515, |
|
"grad_norm": 1.8638678064551115, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1267, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 13.805576488319518, |
|
"grad_norm": 1.4262510400727582, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1273, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 13.86586284853052, |
|
"grad_norm": 1.8828592364046501, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1269, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 13.926149208741522, |
|
"grad_norm": 1.6583448018945106, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1273, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 13.986435568952524, |
|
"grad_norm": 1.548491818827999, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1283, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 13.998492840994725, |
|
"eval_loss": 1.6323552131652832, |
|
"eval_runtime": 115.7608, |
|
"eval_samples_per_second": 38.605, |
|
"eval_steps_per_second": 0.605, |
|
"step": 2322 |
|
}, |
|
{ |
|
"epoch": 14.046721929163526, |
|
"grad_norm": 2.0937924453069203, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1006, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 14.107008289374528, |
|
"grad_norm": 2.139024110314688, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0897, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 14.167294649585532, |
|
"grad_norm": 1.5948191216888472, |
|
"learning_rate": 5e-06, |
|
"loss": 0.092, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 14.227581009796534, |
|
"grad_norm": 1.6158036371008326, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0916, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 14.287867370007536, |
|
"grad_norm": 1.6380184320632138, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0946, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 14.348153730218538, |
|
"grad_norm": 1.5125683894811608, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0926, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 14.40844009042954, |
|
"grad_norm": 1.8291017955611948, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0967, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 14.468726450640542, |
|
"grad_norm": 1.6743144432735446, |
|
"learning_rate": 5e-06, |
|
"loss": 0.097, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 14.529012810851544, |
|
"grad_norm": 1.7276135456842643, |
|
"learning_rate": 5e-06, |
|
"loss": 0.096, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 14.589299171062548, |
|
"grad_norm": 1.734533902590376, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0977, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 14.64958553127355, |
|
"grad_norm": 1.7106037137785368, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0974, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 14.709871891484552, |
|
"grad_norm": 1.848167736152076, |
|
"learning_rate": 5e-06, |
|
"loss": 0.098, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 14.770158251695554, |
|
"grad_norm": 2.0729946870310907, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0988, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 14.830444611906556, |
|
"grad_norm": 1.6522773246662863, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0996, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 14.890730972117558, |
|
"grad_norm": 1.8165186575808512, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1018, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 14.951017332328561, |
|
"grad_norm": 1.8324940323277443, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1022, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 14.999246420497363, |
|
"eval_loss": 1.7541841268539429, |
|
"eval_runtime": 116.4567, |
|
"eval_samples_per_second": 38.375, |
|
"eval_steps_per_second": 0.601, |
|
"step": 2488 |
|
}, |
|
{ |
|
"epoch": 15.011303692539563, |
|
"grad_norm": 1.7122346572238332, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0969, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 15.071590052750565, |
|
"grad_norm": 1.7869038836513413, |
|
"learning_rate": 5e-06, |
|
"loss": 0.07, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 15.131876412961567, |
|
"grad_norm": 1.5692589038411329, |
|
"learning_rate": 5e-06, |
|
"loss": 0.069, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 15.19216277317257, |
|
"grad_norm": 1.5568543176798626, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0714, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 15.252449133383571, |
|
"grad_norm": 1.8979966448489778, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0721, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 15.312735493594575, |
|
"grad_norm": 1.6079155193057422, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0734, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 15.373021853805577, |
|
"grad_norm": 1.5682954069117738, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0731, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 15.433308214016579, |
|
"grad_norm": 1.607771288791899, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0736, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 15.493594574227581, |
|
"grad_norm": 1.592532966261963, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0751, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 15.553880934438583, |
|
"grad_norm": 2.106042369619277, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0765, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 15.614167294649585, |
|
"grad_norm": 1.9643387382353286, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0764, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 15.674453654860589, |
|
"grad_norm": 2.356472326588705, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0766, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 15.73474001507159, |
|
"grad_norm": 1.7285204414699735, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0768, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 15.795026375282593, |
|
"grad_norm": 1.656141946179993, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0781, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 15.855312735493595, |
|
"grad_norm": 1.751656627220462, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0778, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 15.915599095704597, |
|
"grad_norm": 1.9120339283286005, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0777, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 15.975885455915598, |
|
"grad_norm": 1.7091309765672063, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0779, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 1.8729431629180908, |
|
"eval_runtime": 130.3121, |
|
"eval_samples_per_second": 34.295, |
|
"eval_steps_per_second": 0.537, |
|
"step": 2654 |
|
}, |
|
{ |
|
"epoch": 16.0361718161266, |
|
"grad_norm": 1.6873009916146906, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0634, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 16.096458176337602, |
|
"grad_norm": 1.4499459070972593, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0517, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 16.156744536548604, |
|
"grad_norm": 1.518251208097316, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0532, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 16.217030896759606, |
|
"grad_norm": 1.7901282259982598, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0547, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 16.277317256970612, |
|
"grad_norm": 1.6296841948609884, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0553, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 16.337603617181614, |
|
"grad_norm": 1.9077348260850149, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0551, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 16.397889977392616, |
|
"grad_norm": 1.6275792847492503, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0556, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 16.458176337603618, |
|
"grad_norm": 1.8060616409571004, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0569, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 16.51846269781462, |
|
"grad_norm": 1.6503072239622354, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0568, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 16.578749058025622, |
|
"grad_norm": 1.5506661680891511, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0572, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 16.639035418236624, |
|
"grad_norm": 1.6807035773937569, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0583, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 16.699321778447626, |
|
"grad_norm": 1.794347099575113, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0589, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 16.759608138658628, |
|
"grad_norm": 1.6112070974863837, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0591, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 16.81989449886963, |
|
"grad_norm": 1.5309535448790481, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0595, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 16.88018085908063, |
|
"grad_norm": 1.6962829999060645, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0595, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 16.940467219291634, |
|
"grad_norm": 1.5160770218318527, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0607, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 16.994724943481536, |
|
"eval_loss": 1.9862152338027954, |
|
"eval_runtime": 135.1067, |
|
"eval_samples_per_second": 33.078, |
|
"eval_steps_per_second": 0.518, |
|
"step": 2819 |
|
}, |
|
{ |
|
"epoch": 17.00075357950264, |
|
"grad_norm": 2.340332137108714, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0615, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 17.06103993971364, |
|
"grad_norm": 1.4236109373520875, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0397, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 17.121326299924643, |
|
"grad_norm": 1.5339925411690258, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0409, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 17.181612660135645, |
|
"grad_norm": 1.5223509157958206, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0416, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 17.241899020346647, |
|
"grad_norm": 1.7444832258175693, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0425, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 17.30218538055765, |
|
"grad_norm": 1.7829806133150694, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0426, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 17.36247174076865, |
|
"grad_norm": 1.7776363684895005, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0437, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 17.422758100979653, |
|
"grad_norm": 1.449976018411429, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0434, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 17.483044461190655, |
|
"grad_norm": 1.4737183761321249, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0443, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 17.543330821401657, |
|
"grad_norm": 1.5570813152529674, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0439, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 17.60361718161266, |
|
"grad_norm": 1.6635463270221298, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0451, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 17.66390354182366, |
|
"grad_norm": 1.5633000807042863, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0452, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 17.724189902034666, |
|
"grad_norm": 1.5756718088358965, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0453, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 17.78447626224567, |
|
"grad_norm": 1.5086652524908912, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0461, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 17.84476262245667, |
|
"grad_norm": 1.543593492657027, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0471, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 17.905048982667672, |
|
"grad_norm": 1.8850354234893694, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0473, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 17.965335342878674, |
|
"grad_norm": 1.5531948749022353, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0481, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 17.995478522984175, |
|
"eval_loss": 2.0547046661376953, |
|
"eval_runtime": 131.4004, |
|
"eval_samples_per_second": 34.011, |
|
"eval_steps_per_second": 0.533, |
|
"step": 2985 |
|
}, |
|
{ |
|
"epoch": 18.025621703089676, |
|
"grad_norm": 1.5738867889136725, |
|
"learning_rate": 5e-06, |
|
"loss": 0.041, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 18.08590806330068, |
|
"grad_norm": 1.7413824765739192, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0319, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 18.14619442351168, |
|
"grad_norm": 1.349257585093222, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0321, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 18.206480783722682, |
|
"grad_norm": 1.5030527061221075, |
|
"learning_rate": 5e-06, |
|
"loss": 0.033, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 18.266767143933684, |
|
"grad_norm": 1.3839757017227572, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0338, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 18.327053504144686, |
|
"grad_norm": 1.3639549234993298, |
|
"learning_rate": 5e-06, |
|
"loss": 0.034, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 18.38733986435569, |
|
"grad_norm": 1.491804900184454, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0344, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 18.44762622456669, |
|
"grad_norm": 1.7182953986203733, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0345, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 18.507912584777696, |
|
"grad_norm": 1.5927159416873042, |
|
"learning_rate": 5e-06, |
|
"loss": 0.035, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 18.568198944988698, |
|
"grad_norm": 1.48048633519207, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0351, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 18.6284853051997, |
|
"grad_norm": 1.5724972628180884, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0357, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 18.6887716654107, |
|
"grad_norm": 1.460368142482016, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0367, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 18.749058025621704, |
|
"grad_norm": 1.5605571994020253, |
|
"learning_rate": 5e-06, |
|
"loss": 0.037, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 18.809344385832706, |
|
"grad_norm": 1.809142213762153, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0364, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 18.869630746043708, |
|
"grad_norm": 1.6290220037965493, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0367, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 18.92991710625471, |
|
"grad_norm": 1.441383521222635, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0373, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 18.99020346646571, |
|
"grad_norm": 1.4650505575255777, |
|
"learning_rate": 5e-06, |
|
"loss": 0.038, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 18.99623210248681, |
|
"eval_loss": 2.1350691318511963, |
|
"eval_runtime": 116.4178, |
|
"eval_samples_per_second": 38.388, |
|
"eval_steps_per_second": 0.601, |
|
"step": 3151 |
|
}, |
|
{ |
|
"epoch": 19.050489826676714, |
|
"grad_norm": 1.2364462769526054, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0273, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 19.110776186887716, |
|
"grad_norm": 1.2847338125252712, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0263, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 19.171062547098717, |
|
"grad_norm": 1.4364005652974698, |
|
"learning_rate": 5e-06, |
|
"loss": 0.027, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 19.23134890730972, |
|
"grad_norm": 1.4324790800038558, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0278, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 19.291635267520725, |
|
"grad_norm": 1.3696161004609035, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0268, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 19.351921627731727, |
|
"grad_norm": 1.4042401515519942, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0276, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 19.41220798794273, |
|
"grad_norm": 1.4201103499411916, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0282, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 19.47249434815373, |
|
"grad_norm": 1.4787498619597446, |
|
"learning_rate": 5e-06, |
|
"loss": 0.029, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 19.532780708364733, |
|
"grad_norm": 1.3042565121452792, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0289, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 19.593067068575735, |
|
"grad_norm": 1.5616956908222805, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0291, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 19.653353428786737, |
|
"grad_norm": 1.3916636056123413, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0294, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 19.71363978899774, |
|
"grad_norm": 1.5340949993651216, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0302, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 19.77392614920874, |
|
"grad_norm": 1.567955217260001, |
|
"learning_rate": 5e-06, |
|
"loss": 0.03, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 19.834212509419743, |
|
"grad_norm": 1.4614834017683382, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0302, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 19.894498869630745, |
|
"grad_norm": 1.3384969584915662, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0299, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 19.954785229841747, |
|
"grad_norm": 1.6563798754782657, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0306, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 19.99698568198945, |
|
"eval_loss": 2.2255122661590576, |
|
"eval_runtime": 131.2328, |
|
"eval_samples_per_second": 34.054, |
|
"eval_steps_per_second": 0.533, |
|
"step": 3317 |
|
}, |
|
{ |
|
"epoch": 20.015071590052752, |
|
"grad_norm": 1.2478793413376605, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0282, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 20.075357950263754, |
|
"grad_norm": 1.5652614584389117, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0201, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 20.135644310474756, |
|
"grad_norm": 1.6731787596786802, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0215, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 20.195930670685758, |
|
"grad_norm": 1.2852656312690158, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0215, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 20.25621703089676, |
|
"grad_norm": 1.189629660977167, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0218, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 20.316503391107762, |
|
"grad_norm": 1.280211241755101, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0225, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 20.376789751318764, |
|
"grad_norm": 1.3671510362477786, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0233, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 20.437076111529766, |
|
"grad_norm": 1.2479455610052839, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0235, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 20.497362471740768, |
|
"grad_norm": 1.3487469571257409, |
|
"learning_rate": 5e-06, |
|
"loss": 0.024, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 20.55764883195177, |
|
"grad_norm": 1.296539761041376, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0239, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 20.617935192162772, |
|
"grad_norm": 1.3688473131734242, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0243, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 20.678221552373774, |
|
"grad_norm": 1.3764265527684842, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0246, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 20.738507912584776, |
|
"grad_norm": 1.5833602766458634, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0249, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 20.79879427279578, |
|
"grad_norm": 1.528464092769155, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0258, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 20.859080633006784, |
|
"grad_norm": 1.605313026206358, |
|
"learning_rate": 5e-06, |
|
"loss": 0.026, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 20.919366993217785, |
|
"grad_norm": 1.3923344492334147, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0259, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 20.979653353428787, |
|
"grad_norm": 1.4204063096393778, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0256, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 20.997739261492086, |
|
"eval_loss": 2.269946813583374, |
|
"eval_runtime": 116.5456, |
|
"eval_samples_per_second": 38.345, |
|
"eval_steps_per_second": 0.601, |
|
"step": 3483 |
|
}, |
|
{ |
|
"epoch": 21.03993971363979, |
|
"grad_norm": 1.310936775235027, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0208, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 21.10022607385079, |
|
"grad_norm": 1.1547005623677518, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0186, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 21.160512434061793, |
|
"grad_norm": 1.2938934451769595, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0194, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 21.220798794272795, |
|
"grad_norm": 1.484320867371485, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0196, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 21.281085154483797, |
|
"grad_norm": 1.4005288463687333, |
|
"learning_rate": 5e-06, |
|
"loss": 0.02, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 21.3413715146948, |
|
"grad_norm": 1.6013274204687469, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0201, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 21.4016578749058, |
|
"grad_norm": 1.4800630752519932, |
|
"learning_rate": 5e-06, |
|
"loss": 0.02, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 21.461944235116803, |
|
"grad_norm": 1.497381379010042, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0203, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 21.52223059532781, |
|
"grad_norm": 1.2261010629715117, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0208, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 21.58251695553881, |
|
"grad_norm": 1.353310824251755, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0211, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 21.642803315749813, |
|
"grad_norm": 1.2550244372657888, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0217, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 21.703089675960815, |
|
"grad_norm": 1.3870144386972025, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0218, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 21.763376036171817, |
|
"grad_norm": 1.2996006226877992, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0217, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 21.82366239638282, |
|
"grad_norm": 1.3533095992501387, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0216, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 21.88394875659382, |
|
"grad_norm": 1.3112078284304194, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0217, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 21.944235116804823, |
|
"grad_norm": 1.3288506152434172, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0221, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 21.998492840994725, |
|
"eval_loss": 2.351545572280884, |
|
"eval_runtime": 116.2788, |
|
"eval_samples_per_second": 38.433, |
|
"eval_steps_per_second": 0.602, |
|
"step": 3649 |
|
}, |
|
{ |
|
"epoch": 22.004521477015825, |
|
"grad_norm": 1.0649685278407737, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0218, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 22.064807837226827, |
|
"grad_norm": 1.2396804319512318, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0158, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 22.12509419743783, |
|
"grad_norm": 1.2203455145168154, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0163, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 22.18538055764883, |
|
"grad_norm": 1.1547196338255674, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0169, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 22.245666917859833, |
|
"grad_norm": 1.309239398813011, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0173, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 22.305953278070838, |
|
"grad_norm": 1.2683908516455489, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0173, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 22.36623963828184, |
|
"grad_norm": 1.5982780924133835, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0183, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 22.426525998492842, |
|
"grad_norm": 1.4555126747204115, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0184, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 22.486812358703844, |
|
"grad_norm": 1.3675517314579124, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0186, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 22.547098718914846, |
|
"grad_norm": 1.2088813887554757, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0188, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 22.607385079125848, |
|
"grad_norm": 1.157896013440299, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0194, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 22.66767143933685, |
|
"grad_norm": 1.4152388376390785, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0189, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 22.727957799547852, |
|
"grad_norm": 1.2554294042934306, |
|
"learning_rate": 5e-06, |
|
"loss": 0.019, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 22.788244159758854, |
|
"grad_norm": 1.3329931616963409, |
|
"learning_rate": 5e-06, |
|
"loss": 0.019, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 22.848530519969856, |
|
"grad_norm": 1.256565715756104, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0189, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 22.908816880180858, |
|
"grad_norm": 1.368011051794652, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0195, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 22.96910324039186, |
|
"grad_norm": 1.4311223887923525, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0197, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 22.99924642049736, |
|
"eval_loss": 2.359902858734131, |
|
"eval_runtime": 117.0325, |
|
"eval_samples_per_second": 38.186, |
|
"eval_steps_per_second": 0.598, |
|
"step": 3815 |
|
}, |
|
{ |
|
"epoch": 23.029389600602865, |
|
"grad_norm": 1.114496963570823, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0171, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 23.089675960813867, |
|
"grad_norm": 1.1656429989984474, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0149, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 23.14996232102487, |
|
"grad_norm": 1.3181375592191023, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0152, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 23.21024868123587, |
|
"grad_norm": 1.214891422163979, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0157, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 23.270535041446873, |
|
"grad_norm": 1.1329477739782727, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0159, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 23.330821401657875, |
|
"grad_norm": 1.178978239439168, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0163, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 23.391107761868877, |
|
"grad_norm": 1.1072042006237766, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0165, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 23.45139412207988, |
|
"grad_norm": 1.2281267859101168, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0168, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 23.51168048229088, |
|
"grad_norm": 1.1558899107856422, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0174, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 23.571966842501883, |
|
"grad_norm": 1.217612194947995, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0173, |
|
"step": 3910 |
|
}, |
|
{ |
|
"epoch": 23.632253202712885, |
|
"grad_norm": 1.2440180440988633, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0174, |
|
"step": 3920 |
|
}, |
|
{ |
|
"epoch": 23.692539562923887, |
|
"grad_norm": 1.3420865819465875, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0174, |
|
"step": 3930 |
|
}, |
|
{ |
|
"epoch": 23.75282592313489, |
|
"grad_norm": 1.3593190067484822, |
|
"learning_rate": 5e-06, |
|
"loss": 0.018, |
|
"step": 3940 |
|
}, |
|
{ |
|
"epoch": 23.813112283345895, |
|
"grad_norm": 1.2367975594859635, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0177, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 23.873398643556897, |
|
"grad_norm": 1.2214254875430963, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0179, |
|
"step": 3960 |
|
}, |
|
{ |
|
"epoch": 23.9336850037679, |
|
"grad_norm": 1.20689456847013, |
|
"learning_rate": 5e-06, |
|
"loss": 0.018, |
|
"step": 3970 |
|
}, |
|
{ |
|
"epoch": 23.9939713639789, |
|
"grad_norm": 1.2666923485205688, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0186, |
|
"step": 3980 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_loss": 2.3888232707977295, |
|
"eval_runtime": 116.343, |
|
"eval_samples_per_second": 38.412, |
|
"eval_steps_per_second": 0.602, |
|
"step": 3981 |
|
}, |
|
{ |
|
"epoch": 24.054257724189902, |
|
"grad_norm": 1.1066074003233175, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0141, |
|
"step": 3990 |
|
}, |
|
{ |
|
"epoch": 24.114544084400904, |
|
"grad_norm": 1.0389652033545478, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0141, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 24.174830444611906, |
|
"grad_norm": 1.1258379344398495, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0144, |
|
"step": 4010 |
|
}, |
|
{ |
|
"epoch": 24.23511680482291, |
|
"grad_norm": 1.1758249329629795, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0148, |
|
"step": 4020 |
|
}, |
|
{ |
|
"epoch": 24.29540316503391, |
|
"grad_norm": 1.1741885490501298, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0151, |
|
"step": 4030 |
|
}, |
|
{ |
|
"epoch": 24.355689525244912, |
|
"grad_norm": 1.1622125971160786, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0156, |
|
"step": 4040 |
|
}, |
|
{ |
|
"epoch": 24.415975885455914, |
|
"grad_norm": 1.0899820074898727, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0154, |
|
"step": 4050 |
|
}, |
|
{ |
|
"epoch": 24.476262245666916, |
|
"grad_norm": 1.2599823712922462, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0159, |
|
"step": 4060 |
|
}, |
|
{ |
|
"epoch": 24.53654860587792, |
|
"grad_norm": 1.140178001121795, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0157, |
|
"step": 4070 |
|
}, |
|
{ |
|
"epoch": 24.596834966088924, |
|
"grad_norm": 1.1230288891139422, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0158, |
|
"step": 4080 |
|
}, |
|
{ |
|
"epoch": 24.657121326299926, |
|
"grad_norm": 1.1218784125193524, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0163, |
|
"step": 4090 |
|
}, |
|
{ |
|
"epoch": 24.717407686510928, |
|
"grad_norm": 1.1524016261830858, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0164, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 24.77769404672193, |
|
"grad_norm": 1.15414666614049, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0166, |
|
"step": 4110 |
|
}, |
|
{ |
|
"epoch": 24.83798040693293, |
|
"grad_norm": 1.3244431296269448, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0169, |
|
"step": 4120 |
|
}, |
|
{ |
|
"epoch": 24.868123587038433, |
|
"eval_loss": 2.4057514667510986, |
|
"eval_runtime": 113.5854, |
|
"eval_samples_per_second": 39.345, |
|
"eval_steps_per_second": 0.616, |
|
"step": 4125 |
|
}, |
|
{ |
|
"epoch": 24.868123587038433, |
|
"step": 4125, |
|
"total_flos": 6909319257784320.0, |
|
"train_loss": 0.2661740447047985, |
|
"train_runtime": 139979.5045, |
|
"train_samples_per_second": 15.162, |
|
"train_steps_per_second": 0.029 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 4125, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 25, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6909319257784320.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|