|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 200, |
|
"global_step": 1086, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0009208103130755065, |
|
"grad_norm": 0.7104283979721944, |
|
"learning_rate": 1.8348623853211011e-06, |
|
"loss": 1.2184, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004604051565377533, |
|
"grad_norm": 0.8484084058142851, |
|
"learning_rate": 9.174311926605506e-06, |
|
"loss": 1.2345, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.009208103130755065, |
|
"grad_norm": 0.8895650483641482, |
|
"learning_rate": 1.834862385321101e-05, |
|
"loss": 1.2996, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.013812154696132596, |
|
"grad_norm": 0.6693826221356033, |
|
"learning_rate": 2.7522935779816515e-05, |
|
"loss": 1.2409, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01841620626151013, |
|
"grad_norm": 0.9775039809987707, |
|
"learning_rate": 3.669724770642202e-05, |
|
"loss": 1.078, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.02302025782688766, |
|
"grad_norm": 0.42561125364219876, |
|
"learning_rate": 4.587155963302753e-05, |
|
"loss": 0.9796, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.027624309392265192, |
|
"grad_norm": 0.20515777384298603, |
|
"learning_rate": 5.504587155963303e-05, |
|
"loss": 0.8728, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.03222836095764273, |
|
"grad_norm": 0.21277732125436763, |
|
"learning_rate": 6.422018348623854e-05, |
|
"loss": 0.8742, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.03683241252302026, |
|
"grad_norm": 0.2807596133606811, |
|
"learning_rate": 7.339449541284404e-05, |
|
"loss": 0.8226, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.04143646408839779, |
|
"grad_norm": 0.2462315157718494, |
|
"learning_rate": 8.256880733944955e-05, |
|
"loss": 0.8238, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.04604051565377532, |
|
"grad_norm": 0.1796423907240695, |
|
"learning_rate": 9.174311926605506e-05, |
|
"loss": 0.8043, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.05064456721915286, |
|
"grad_norm": 0.16102348645765954, |
|
"learning_rate": 0.00010091743119266055, |
|
"loss": 0.7908, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.055248618784530384, |
|
"grad_norm": 0.1468812473488326, |
|
"learning_rate": 0.00011009174311926606, |
|
"loss": 0.7798, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.05985267034990792, |
|
"grad_norm": 0.15243943821399936, |
|
"learning_rate": 0.00011926605504587157, |
|
"loss": 0.7939, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.06445672191528545, |
|
"grad_norm": 0.16911773454140264, |
|
"learning_rate": 0.00012844036697247707, |
|
"loss": 0.7692, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.06906077348066299, |
|
"grad_norm": 0.14029887618581424, |
|
"learning_rate": 0.00013761467889908258, |
|
"loss": 0.7702, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.07366482504604052, |
|
"grad_norm": 0.15991589862519767, |
|
"learning_rate": 0.0001467889908256881, |
|
"loss": 0.7801, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.07826887661141804, |
|
"grad_norm": 0.16836933389270248, |
|
"learning_rate": 0.0001559633027522936, |
|
"loss": 0.7493, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.08287292817679558, |
|
"grad_norm": 0.17161343155107908, |
|
"learning_rate": 0.0001651376146788991, |
|
"loss": 0.752, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.08747697974217311, |
|
"grad_norm": 0.17139003055306476, |
|
"learning_rate": 0.00017431192660550458, |
|
"loss": 0.7504, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.09208103130755065, |
|
"grad_norm": 0.14118381221233112, |
|
"learning_rate": 0.00018348623853211012, |
|
"loss": 0.7474, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.09668508287292818, |
|
"grad_norm": 0.15274245520687557, |
|
"learning_rate": 0.0001926605504587156, |
|
"loss": 0.7153, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.10128913443830571, |
|
"grad_norm": 0.17243376201491117, |
|
"learning_rate": 0.00019999948301225543, |
|
"loss": 0.7418, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.10589318600368323, |
|
"grad_norm": 0.15501917072069255, |
|
"learning_rate": 0.00019998138900246913, |
|
"loss": 0.7471, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.11049723756906077, |
|
"grad_norm": 0.1721566405839237, |
|
"learning_rate": 0.00019993745095073183, |
|
"loss": 0.7453, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.1151012891344383, |
|
"grad_norm": 0.13679964856454155, |
|
"learning_rate": 0.00019986768021452574, |
|
"loss": 0.7228, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.11970534069981584, |
|
"grad_norm": 0.14703324544205662, |
|
"learning_rate": 0.00019977209482878574, |
|
"loss": 0.7124, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.12430939226519337, |
|
"grad_norm": 0.18865400963921064, |
|
"learning_rate": 0.00019965071950123732, |
|
"loss": 0.7536, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.1289134438305709, |
|
"grad_norm": 0.1474544520231936, |
|
"learning_rate": 0.00019950358560601013, |
|
"loss": 0.7429, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.13351749539594843, |
|
"grad_norm": 0.13523126598394036, |
|
"learning_rate": 0.00019933073117552795, |
|
"loss": 0.7267, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.13812154696132597, |
|
"grad_norm": 0.146238565087472, |
|
"learning_rate": 0.00019913220089067793, |
|
"loss": 0.7749, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.1427255985267035, |
|
"grad_norm": 0.1550423764417148, |
|
"learning_rate": 0.00019890804606926087, |
|
"loss": 0.7329, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.14732965009208104, |
|
"grad_norm": 0.1591980721619053, |
|
"learning_rate": 0.00019865832465272635, |
|
"loss": 0.7534, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.15193370165745856, |
|
"grad_norm": 0.1531186776106742, |
|
"learning_rate": 0.00019838310119119545, |
|
"loss": 0.756, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.15653775322283608, |
|
"grad_norm": 0.1476816081597602, |
|
"learning_rate": 0.0001980824468267753, |
|
"loss": 0.7489, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.16114180478821363, |
|
"grad_norm": 0.14795132619266863, |
|
"learning_rate": 0.00019775643927516954, |
|
"loss": 0.7209, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.16574585635359115, |
|
"grad_norm": 0.14679273124596579, |
|
"learning_rate": 0.00019740516280559005, |
|
"loss": 0.7296, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.1703499079189687, |
|
"grad_norm": 0.14120244970365975, |
|
"learning_rate": 0.00019702870821897386, |
|
"loss": 0.7512, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.17495395948434622, |
|
"grad_norm": 0.1399532146516525, |
|
"learning_rate": 0.00019662717282451249, |
|
"loss": 0.7091, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.17955801104972377, |
|
"grad_norm": 0.1472244949078579, |
|
"learning_rate": 0.00019620066041449853, |
|
"loss": 0.7235, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.1841620626151013, |
|
"grad_norm": 0.13348485928094997, |
|
"learning_rate": 0.0001957492812374965, |
|
"loss": 0.7109, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1841620626151013, |
|
"eval_loss": 0.7064515352249146, |
|
"eval_runtime": 48.9511, |
|
"eval_samples_per_second": 10.214, |
|
"eval_steps_per_second": 0.654, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.1887661141804788, |
|
"grad_norm": 0.12975499369978807, |
|
"learning_rate": 0.00019527315196984487, |
|
"loss": 0.7103, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.19337016574585636, |
|
"grad_norm": 0.1309790778507949, |
|
"learning_rate": 0.00019477239568549667, |
|
"loss": 0.7166, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.19797421731123388, |
|
"grad_norm": 0.13709065108741178, |
|
"learning_rate": 0.00019424714182420604, |
|
"loss": 0.7043, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.20257826887661143, |
|
"grad_norm": 0.13342648155684383, |
|
"learning_rate": 0.00019369752615806988, |
|
"loss": 0.7136, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.20718232044198895, |
|
"grad_norm": 0.13844474906898738, |
|
"learning_rate": 0.00019312369075643194, |
|
"loss": 0.7207, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.21178637200736647, |
|
"grad_norm": 0.14186684102196156, |
|
"learning_rate": 0.00019252578394915988, |
|
"loss": 0.7255, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.21639042357274402, |
|
"grad_norm": 0.15866211011849904, |
|
"learning_rate": 0.0001919039602883035, |
|
"loss": 0.7223, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.22099447513812154, |
|
"grad_norm": 0.1314674917628441, |
|
"learning_rate": 0.00019125838050814472, |
|
"loss": 0.7228, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.22559852670349909, |
|
"grad_norm": 0.14491106611651047, |
|
"learning_rate": 0.00019058921148364994, |
|
"loss": 0.7261, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.2302025782688766, |
|
"grad_norm": 0.12821922589500195, |
|
"learning_rate": 0.00018989662618733446, |
|
"loss": 0.7071, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.23480662983425415, |
|
"grad_norm": 0.14571212342587975, |
|
"learning_rate": 0.00018918080364455122, |
|
"loss": 0.7016, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.23941068139963168, |
|
"grad_norm": 0.13772160137757872, |
|
"learning_rate": 0.0001884419288872147, |
|
"loss": 0.7069, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.2440147329650092, |
|
"grad_norm": 0.1423677034382106, |
|
"learning_rate": 0.00018768019290597252, |
|
"loss": 0.7294, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.24861878453038674, |
|
"grad_norm": 0.12203724600652281, |
|
"learning_rate": 0.0001868957926008362, |
|
"loss": 0.7168, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.2532228360957643, |
|
"grad_norm": 0.1329347877570805, |
|
"learning_rate": 0.00018608893073028505, |
|
"loss": 0.7026, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.2578268876611418, |
|
"grad_norm": 0.13186384109645674, |
|
"learning_rate": 0.00018525981585885536, |
|
"loss": 0.7082, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.26243093922651933, |
|
"grad_norm": 0.14256872075438773, |
|
"learning_rate": 0.00018440866230322878, |
|
"loss": 0.6917, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.26703499079189685, |
|
"grad_norm": 0.14428503648361754, |
|
"learning_rate": 0.00018353569007683394, |
|
"loss": 0.719, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.2716390423572744, |
|
"grad_norm": 0.1820352962113873, |
|
"learning_rate": 0.00018264112483297565, |
|
"loss": 0.7264, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.27624309392265195, |
|
"grad_norm": 0.15797994447204652, |
|
"learning_rate": 0.00018172519780650594, |
|
"loss": 0.7195, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.28084714548802947, |
|
"grad_norm": 0.13313017408023753, |
|
"learning_rate": 0.00018078814575405244, |
|
"loss": 0.7425, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.285451197053407, |
|
"grad_norm": 0.13520681133489493, |
|
"learning_rate": 0.00017983021089281983, |
|
"loss": 0.707, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.2900552486187845, |
|
"grad_norm": 0.12245549217896762, |
|
"learning_rate": 0.0001788516408379791, |
|
"loss": 0.7154, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.2946593001841621, |
|
"grad_norm": 0.12748362531526, |
|
"learning_rate": 0.0001778526885386621, |
|
"loss": 0.7197, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.2992633517495396, |
|
"grad_norm": 0.11117223964124032, |
|
"learning_rate": 0.00017683361221257707, |
|
"loss": 0.6973, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.30386740331491713, |
|
"grad_norm": 0.1289936719815313, |
|
"learning_rate": 0.0001757946752792622, |
|
"loss": 0.7247, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.30847145488029465, |
|
"grad_norm": 0.16488859196213657, |
|
"learning_rate": 0.00017473614629199486, |
|
"loss": 0.7286, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.31307550644567217, |
|
"grad_norm": 0.14470083977942239, |
|
"learning_rate": 0.00017365829886837353, |
|
"loss": 0.7343, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.31767955801104975, |
|
"grad_norm": 0.13447366414444684, |
|
"learning_rate": 0.00017256141161959085, |
|
"loss": 0.7255, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.32228360957642727, |
|
"grad_norm": 0.1217662376207493, |
|
"learning_rate": 0.00017144576807841583, |
|
"loss": 0.724, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.3268876611418048, |
|
"grad_norm": 0.15300308488915879, |
|
"learning_rate": 0.00017031165662590388, |
|
"loss": 0.7267, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.3314917127071823, |
|
"grad_norm": 0.13907462570860107, |
|
"learning_rate": 0.0001691593704168536, |
|
"loss": 0.748, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.3360957642725598, |
|
"grad_norm": 0.13780875633005546, |
|
"learning_rate": 0.0001679892073040296, |
|
"loss": 0.7262, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.3406998158379374, |
|
"grad_norm": 0.1305755908910725, |
|
"learning_rate": 0.00016680146976117105, |
|
"loss": 0.7155, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.3453038674033149, |
|
"grad_norm": 0.14568215090440456, |
|
"learning_rate": 0.00016559646480480562, |
|
"loss": 0.7223, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.34990791896869244, |
|
"grad_norm": 0.12857349185078817, |
|
"learning_rate": 0.00016437450391488926, |
|
"loss": 0.7336, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.35451197053406996, |
|
"grad_norm": 0.11784320830005238, |
|
"learning_rate": 0.00016313590295429224, |
|
"loss": 0.6889, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.35911602209944754, |
|
"grad_norm": 0.13646526167574166, |
|
"learning_rate": 0.00016188098208715218, |
|
"loss": 0.7192, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.36372007366482506, |
|
"grad_norm": 0.12175101806984481, |
|
"learning_rate": 0.00016061006569611524, |
|
"loss": 0.6836, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.3683241252302026, |
|
"grad_norm": 0.13758905451063172, |
|
"learning_rate": 0.00015932348229848702, |
|
"loss": 0.7185, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.3683241252302026, |
|
"eval_loss": 0.6904310584068298, |
|
"eval_runtime": 47.8076, |
|
"eval_samples_per_second": 10.459, |
|
"eval_steps_per_second": 0.669, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.3729281767955801, |
|
"grad_norm": 0.11922226680398379, |
|
"learning_rate": 0.00015802156446131463, |
|
"loss": 0.7108, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.3775322283609576, |
|
"grad_norm": 0.12914369421847746, |
|
"learning_rate": 0.0001567046487154218, |
|
"loss": 0.7297, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.3821362799263352, |
|
"grad_norm": 0.163588935842005, |
|
"learning_rate": 0.0001553730754684196, |
|
"loss": 0.7286, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.3867403314917127, |
|
"grad_norm": 0.1305481480361055, |
|
"learning_rate": 0.000154027188916715, |
|
"loss": 0.7083, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.39134438305709024, |
|
"grad_norm": 0.13075746937102734, |
|
"learning_rate": 0.00015266733695653998, |
|
"loss": 0.7022, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.39594843462246776, |
|
"grad_norm": 0.13867395016440975, |
|
"learning_rate": 0.0001512938710940244, |
|
"loss": 0.6883, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.4005524861878453, |
|
"grad_norm": 0.1468540519744832, |
|
"learning_rate": 0.00014990714635433567, |
|
"loss": 0.724, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.40515653775322286, |
|
"grad_norm": 0.1306098517144093, |
|
"learning_rate": 0.00014850752118990884, |
|
"loss": 0.7063, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.4097605893186004, |
|
"grad_norm": 0.1424067979705426, |
|
"learning_rate": 0.00014709535738779075, |
|
"loss": 0.7402, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.4143646408839779, |
|
"grad_norm": 0.1407997721241163, |
|
"learning_rate": 0.00014567101997612215, |
|
"loss": 0.7006, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.4189686924493554, |
|
"grad_norm": 0.14729031365921527, |
|
"learning_rate": 0.00014423487712978238, |
|
"loss": 0.7024, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.42357274401473294, |
|
"grad_norm": 0.13260028024156117, |
|
"learning_rate": 0.00014278730007521998, |
|
"loss": 0.7173, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.4281767955801105, |
|
"grad_norm": 0.1335675383386336, |
|
"learning_rate": 0.00014132866299449524, |
|
"loss": 0.7289, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.43278084714548803, |
|
"grad_norm": 0.14637775224173455, |
|
"learning_rate": 0.00013985934292855826, |
|
"loss": 0.7005, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.43738489871086556, |
|
"grad_norm": 0.1502047175694278, |
|
"learning_rate": 0.00013837971967978836, |
|
"loss": 0.7004, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.4419889502762431, |
|
"grad_norm": 0.14633484532534424, |
|
"learning_rate": 0.00013689017571381928, |
|
"loss": 0.7288, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.44659300184162065, |
|
"grad_norm": 0.13826365436143537, |
|
"learning_rate": 0.00013539109606067643, |
|
"loss": 0.7085, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.45119705340699817, |
|
"grad_norm": 0.14385098441490782, |
|
"learning_rate": 0.00013388286821525085, |
|
"loss": 0.6974, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.4558011049723757, |
|
"grad_norm": 0.13011880817111474, |
|
"learning_rate": 0.00013236588203713646, |
|
"loss": 0.7041, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.4604051565377532, |
|
"grad_norm": 0.12462262006306969, |
|
"learning_rate": 0.0001308405296498556, |
|
"loss": 0.6962, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.46500920810313073, |
|
"grad_norm": 0.12244444903136163, |
|
"learning_rate": 0.00012930720533949966, |
|
"loss": 0.6999, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.4696132596685083, |
|
"grad_norm": 0.1339305914859919, |
|
"learning_rate": 0.00012776630545281088, |
|
"loss": 0.6806, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.47421731123388583, |
|
"grad_norm": 0.13980664973687393, |
|
"learning_rate": 0.00012621822829473074, |
|
"loss": 0.7394, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.47882136279926335, |
|
"grad_norm": 0.16365863143826853, |
|
"learning_rate": 0.00012466337402544333, |
|
"loss": 0.7279, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.48342541436464087, |
|
"grad_norm": 0.1335430922734083, |
|
"learning_rate": 0.00012310214455693808, |
|
"loss": 0.7034, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.4880294659300184, |
|
"grad_norm": 0.14433559304424956, |
|
"learning_rate": 0.0001215349434491203, |
|
"loss": 0.6937, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.49263351749539597, |
|
"grad_norm": 0.132024243038777, |
|
"learning_rate": 0.00011996217580549556, |
|
"loss": 0.6986, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.4972375690607735, |
|
"grad_norm": 0.13633158247507002, |
|
"learning_rate": 0.00011838424816845506, |
|
"loss": 0.7044, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.501841620626151, |
|
"grad_norm": 0.12135498656651347, |
|
"learning_rate": 0.0001168015684141891, |
|
"loss": 0.7231, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.5064456721915286, |
|
"grad_norm": 0.1381176811216971, |
|
"learning_rate": 0.0001152145456472558, |
|
"loss": 0.6942, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.511049723756906, |
|
"grad_norm": 0.14462125066523807, |
|
"learning_rate": 0.00011362359009483212, |
|
"loss": 0.7, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.5156537753222836, |
|
"grad_norm": 0.1401876774896849, |
|
"learning_rate": 0.00011202911300067507, |
|
"loss": 0.6917, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.5202578268876611, |
|
"grad_norm": 0.12761995189511605, |
|
"learning_rate": 0.00011043152651881971, |
|
"loss": 0.6913, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.5248618784530387, |
|
"grad_norm": 0.1309083720987428, |
|
"learning_rate": 0.0001088312436070422, |
|
"loss": 0.7039, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.5294659300184162, |
|
"grad_norm": 0.13969770628383765, |
|
"learning_rate": 0.00010722867792011486, |
|
"loss": 0.7138, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.5340699815837937, |
|
"grad_norm": 0.12065461537026531, |
|
"learning_rate": 0.0001056242437028812, |
|
"loss": 0.7012, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.5386740331491713, |
|
"grad_norm": 0.1403328333527622, |
|
"learning_rate": 0.00010401835568317842, |
|
"loss": 0.7107, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.5432780847145487, |
|
"grad_norm": 0.1524065503450793, |
|
"learning_rate": 0.00010241142896463491, |
|
"loss": 0.7253, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.5478821362799263, |
|
"grad_norm": 0.1459506865219953, |
|
"learning_rate": 0.00010080387891937084, |
|
"loss": 0.7164, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.5524861878453039, |
|
"grad_norm": 0.13249474415125184, |
|
"learning_rate": 9.91961210806292e-05, |
|
"loss": 0.7062, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.5524861878453039, |
|
"eval_loss": 0.681869387626648, |
|
"eval_runtime": 47.8405, |
|
"eval_samples_per_second": 10.451, |
|
"eval_steps_per_second": 0.669, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.5570902394106814, |
|
"grad_norm": 0.12788937438752834, |
|
"learning_rate": 9.758857103536513e-05, |
|
"loss": 0.7099, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.5616942909760589, |
|
"grad_norm": 0.14086950862534534, |
|
"learning_rate": 9.59816443168216e-05, |
|
"loss": 0.6991, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.5662983425414365, |
|
"grad_norm": 0.12626280805151868, |
|
"learning_rate": 9.437575629711882e-05, |
|
"loss": 0.7099, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.570902394106814, |
|
"grad_norm": 0.14750922735308492, |
|
"learning_rate": 9.277132207988517e-05, |
|
"loss": 0.7116, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.5755064456721916, |
|
"grad_norm": 0.12947957016862066, |
|
"learning_rate": 9.116875639295782e-05, |
|
"loss": 0.7173, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.580110497237569, |
|
"grad_norm": 0.1217085102913984, |
|
"learning_rate": 8.95684734811803e-05, |
|
"loss": 0.6782, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.5847145488029466, |
|
"grad_norm": 0.12805246153113478, |
|
"learning_rate": 8.797088699932494e-05, |
|
"loss": 0.6981, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.5893186003683242, |
|
"grad_norm": 0.13677967208339983, |
|
"learning_rate": 8.637640990516792e-05, |
|
"loss": 0.725, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.5939226519337016, |
|
"grad_norm": 0.1348897795554575, |
|
"learning_rate": 8.478545435274423e-05, |
|
"loss": 0.7403, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.5985267034990792, |
|
"grad_norm": 0.1425142278541099, |
|
"learning_rate": 8.319843158581092e-05, |
|
"loss": 0.7524, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.6031307550644567, |
|
"grad_norm": 0.1371048777547919, |
|
"learning_rate": 8.161575183154495e-05, |
|
"loss": 0.7075, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.6077348066298343, |
|
"grad_norm": 0.1445081769660327, |
|
"learning_rate": 8.003782419450447e-05, |
|
"loss": 0.6835, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.6123388581952118, |
|
"grad_norm": 0.11760848785277568, |
|
"learning_rate": 7.846505655087972e-05, |
|
"loss": 0.7081, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.6169429097605893, |
|
"grad_norm": 0.1438799054992206, |
|
"learning_rate": 7.689785544306193e-05, |
|
"loss": 0.7187, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.6215469613259669, |
|
"grad_norm": 0.14002441000957835, |
|
"learning_rate": 7.533662597455667e-05, |
|
"loss": 0.7009, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.6261510128913443, |
|
"grad_norm": 0.14656402716158887, |
|
"learning_rate": 7.378177170526927e-05, |
|
"loss": 0.6918, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.6307550644567219, |
|
"grad_norm": 0.15779771059270742, |
|
"learning_rate": 7.223369454718918e-05, |
|
"loss": 0.7186, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.6353591160220995, |
|
"grad_norm": 0.11794313696285232, |
|
"learning_rate": 7.069279466050035e-05, |
|
"loss": 0.6843, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.639963167587477, |
|
"grad_norm": 0.13595292601440173, |
|
"learning_rate": 6.915947035014443e-05, |
|
"loss": 0.7168, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.6445672191528545, |
|
"grad_norm": 0.1344328444601024, |
|
"learning_rate": 6.763411796286357e-05, |
|
"loss": 0.6965, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.649171270718232, |
|
"grad_norm": 0.1231670701178996, |
|
"learning_rate": 6.611713178474916e-05, |
|
"loss": 0.7244, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.6537753222836096, |
|
"grad_norm": 0.1437978280644596, |
|
"learning_rate": 6.460890393932362e-05, |
|
"loss": 0.6734, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.6583793738489871, |
|
"grad_norm": 0.13712514064241163, |
|
"learning_rate": 6.310982428618077e-05, |
|
"loss": 0.7112, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.6629834254143646, |
|
"grad_norm": 0.12729820444217552, |
|
"learning_rate": 6.162028032021167e-05, |
|
"loss": 0.677, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.6675874769797422, |
|
"grad_norm": 0.12007586806931402, |
|
"learning_rate": 6.014065707144176e-05, |
|
"loss": 0.6761, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.6721915285451197, |
|
"grad_norm": 0.11950710825018164, |
|
"learning_rate": 5.867133700550479e-05, |
|
"loss": 0.7132, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.6767955801104972, |
|
"grad_norm": 0.12219449475526536, |
|
"learning_rate": 5.721269992478002e-05, |
|
"loss": 0.6891, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.6813996316758748, |
|
"grad_norm": 0.13820553666745064, |
|
"learning_rate": 5.576512287021765e-05, |
|
"loss": 0.7032, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.6860036832412523, |
|
"grad_norm": 0.12575435617713188, |
|
"learning_rate": 5.432898002387783e-05, |
|
"loss": 0.685, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.6906077348066298, |
|
"grad_norm": 0.12305743189245932, |
|
"learning_rate": 5.290464261220927e-05, |
|
"loss": 0.6874, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.6952117863720073, |
|
"grad_norm": 0.1602899973447253, |
|
"learning_rate": 5.149247881009118e-05, |
|
"loss": 0.7074, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.6998158379373849, |
|
"grad_norm": 0.11125288181618329, |
|
"learning_rate": 5.009285364566435e-05, |
|
"loss": 0.6646, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.7044198895027625, |
|
"grad_norm": 0.12864820726453974, |
|
"learning_rate": 4.870612890597565e-05, |
|
"loss": 0.6976, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.7090239410681399, |
|
"grad_norm": 0.12633554254462218, |
|
"learning_rate": 4.733266304346005e-05, |
|
"loss": 0.6886, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.7136279926335175, |
|
"grad_norm": 0.13645165523957492, |
|
"learning_rate": 4.5972811083285014e-05, |
|
"loss": 0.7263, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.7182320441988951, |
|
"grad_norm": 0.14653256629663075, |
|
"learning_rate": 4.462692453158039e-05, |
|
"loss": 0.7078, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.7228360957642725, |
|
"grad_norm": 0.13327438185784568, |
|
"learning_rate": 4.329535128457822e-05, |
|
"loss": 0.7057, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.7274401473296501, |
|
"grad_norm": 0.1383815308770519, |
|
"learning_rate": 4.197843553868538e-05, |
|
"loss": 0.6894, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.7320441988950276, |
|
"grad_norm": 0.12797224319884995, |
|
"learning_rate": 4.0676517701513015e-05, |
|
"loss": 0.685, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.7366482504604052, |
|
"grad_norm": 0.13203365820667615, |
|
"learning_rate": 3.93899343038848e-05, |
|
"loss": 0.6629, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.7366482504604052, |
|
"eval_loss": 0.6758299469947815, |
|
"eval_runtime": 47.789, |
|
"eval_samples_per_second": 10.463, |
|
"eval_steps_per_second": 0.67, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.7412523020257827, |
|
"grad_norm": 0.14945609861259754, |
|
"learning_rate": 3.8119017912847873e-05, |
|
"loss": 0.7157, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.7458563535911602, |
|
"grad_norm": 0.14064204226125424, |
|
"learning_rate": 3.686409704570778e-05, |
|
"loss": 0.7077, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.7504604051565378, |
|
"grad_norm": 0.15174343823382735, |
|
"learning_rate": 3.5625496085110754e-05, |
|
"loss": 0.714, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.7550644567219152, |
|
"grad_norm": 0.13218371839998455, |
|
"learning_rate": 3.440353519519439e-05, |
|
"loss": 0.7095, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.7596685082872928, |
|
"grad_norm": 0.1323028538168689, |
|
"learning_rate": 3.3198530238828976e-05, |
|
"loss": 0.6971, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.7642725598526704, |
|
"grad_norm": 0.14745827705981765, |
|
"learning_rate": 3.20107926959704e-05, |
|
"loss": 0.7041, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.7688766114180479, |
|
"grad_norm": 0.1442754442357172, |
|
"learning_rate": 3.0840629583146416e-05, |
|
"loss": 0.6814, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.7734806629834254, |
|
"grad_norm": 0.13106050495634988, |
|
"learning_rate": 2.9688343374096115e-05, |
|
"loss": 0.6823, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.7780847145488029, |
|
"grad_norm": 0.1644775188382007, |
|
"learning_rate": 2.8554231921584162e-05, |
|
"loss": 0.7221, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.7826887661141805, |
|
"grad_norm": 0.13022155447713443, |
|
"learning_rate": 2.743858838040918e-05, |
|
"loss": 0.7135, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.787292817679558, |
|
"grad_norm": 0.13264345669070807, |
|
"learning_rate": 2.6341701131626486e-05, |
|
"loss": 0.6979, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.7918968692449355, |
|
"grad_norm": 0.14323695673993403, |
|
"learning_rate": 2.526385370800515e-05, |
|
"loss": 0.6969, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.7965009208103131, |
|
"grad_norm": 0.1542994659104081, |
|
"learning_rate": 2.4205324720737786e-05, |
|
"loss": 0.7042, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.8011049723756906, |
|
"grad_norm": 0.1449879659078434, |
|
"learning_rate": 2.316638778742295e-05, |
|
"loss": 0.6899, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.8057090239410681, |
|
"grad_norm": 0.13076802781423985, |
|
"learning_rate": 2.214731146133793e-05, |
|
"loss": 0.7111, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.8103130755064457, |
|
"grad_norm": 0.13291253398347233, |
|
"learning_rate": 2.1148359162020937e-05, |
|
"loss": 0.6914, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.8149171270718232, |
|
"grad_norm": 0.13550181071065082, |
|
"learning_rate": 2.0169789107180193e-05, |
|
"loss": 0.6866, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.8195211786372008, |
|
"grad_norm": 0.1210302923733577, |
|
"learning_rate": 1.921185424594758e-05, |
|
"loss": 0.6879, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.8241252302025782, |
|
"grad_norm": 0.14037576745574026, |
|
"learning_rate": 1.827480219349409e-05, |
|
"loss": 0.7038, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.8287292817679558, |
|
"grad_norm": 0.14791019025478577, |
|
"learning_rate": 1.7358875167024335e-05, |
|
"loss": 0.6604, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 0.11954867056661006, |
|
"learning_rate": 1.6464309923166033e-05, |
|
"loss": 0.7045, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.8379373848987108, |
|
"grad_norm": 0.13365725863804348, |
|
"learning_rate": 1.5591337696771246e-05, |
|
"loss": 0.7313, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.8425414364640884, |
|
"grad_norm": 0.13259625682339735, |
|
"learning_rate": 1.4740184141144664e-05, |
|
"loss": 0.7099, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.8471454880294659, |
|
"grad_norm": 0.14145923503935914, |
|
"learning_rate": 1.391106926971496e-05, |
|
"loss": 0.6952, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.8517495395948435, |
|
"grad_norm": 0.14338379016355754, |
|
"learning_rate": 1.3104207399163815e-05, |
|
"loss": 0.6856, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.856353591160221, |
|
"grad_norm": 0.14829983664793228, |
|
"learning_rate": 1.2319807094027492e-05, |
|
"loss": 0.7156, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.8609576427255985, |
|
"grad_norm": 0.12075075754668071, |
|
"learning_rate": 1.1558071112785296e-05, |
|
"loss": 0.6929, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.8655616942909761, |
|
"grad_norm": 0.12950812269617865, |
|
"learning_rate": 1.0819196355448801e-05, |
|
"loss": 0.7045, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.8701657458563536, |
|
"grad_norm": 0.1348184333941541, |
|
"learning_rate": 1.0103373812665551e-05, |
|
"loss": 0.6891, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.8747697974217311, |
|
"grad_norm": 0.12025879421103704, |
|
"learning_rate": 9.410788516350077e-06, |
|
"loss": 0.6829, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.8793738489871087, |
|
"grad_norm": 0.1332253794557465, |
|
"learning_rate": 8.741619491855291e-06, |
|
"loss": 0.7057, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.8839779005524862, |
|
"grad_norm": 0.1219486531845245, |
|
"learning_rate": 8.096039711696545e-06, |
|
"loss": 0.701, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.8885819521178637, |
|
"grad_norm": 0.13660210114125856, |
|
"learning_rate": 7.474216050840122e-06, |
|
"loss": 0.7016, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.8931860036832413, |
|
"grad_norm": 0.11517510371236951, |
|
"learning_rate": 6.876309243568058e-06, |
|
"loss": 0.6961, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.8977900552486188, |
|
"grad_norm": 0.14163517493869995, |
|
"learning_rate": 6.302473841930134e-06, |
|
"loss": 0.7082, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.9023941068139963, |
|
"grad_norm": 0.14904472118298337, |
|
"learning_rate": 5.752858175793951e-06, |
|
"loss": 0.7017, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.9069981583793738, |
|
"grad_norm": 0.13222086336141328, |
|
"learning_rate": 5.2276043145033425e-06, |
|
"loss": 0.6951, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.9116022099447514, |
|
"grad_norm": 0.12747716652913677, |
|
"learning_rate": 4.726848030155129e-06, |
|
"loss": 0.727, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.916206261510129, |
|
"grad_norm": 0.142369192844187, |
|
"learning_rate": 4.2507187625035135e-06, |
|
"loss": 0.7112, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.9208103130755064, |
|
"grad_norm": 0.12916678602655413, |
|
"learning_rate": 3.7993395855014936e-06, |
|
"loss": 0.6769, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.9208103130755064, |
|
"eval_loss": 0.6735966205596924, |
|
"eval_runtime": 47.6678, |
|
"eval_samples_per_second": 10.489, |
|
"eval_steps_per_second": 0.671, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.925414364640884, |
|
"grad_norm": 0.1315541461105031, |
|
"learning_rate": 3.372827175487536e-06, |
|
"loss": 0.7239, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.9300184162062615, |
|
"grad_norm": 0.12551668481359965, |
|
"learning_rate": 2.9712917810261644e-06, |
|
"loss": 0.707, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.934622467771639, |
|
"grad_norm": 0.13010659142747308, |
|
"learning_rate": 2.5948371944099713e-06, |
|
"loss": 0.7066, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.9392265193370166, |
|
"grad_norm": 0.1374733909690572, |
|
"learning_rate": 2.2435607248304623e-06, |
|
"loss": 0.6981, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.9438305709023941, |
|
"grad_norm": 0.1557801553309302, |
|
"learning_rate": 1.91755317322474e-06, |
|
"loss": 0.6994, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.9484346224677717, |
|
"grad_norm": 0.1264138943615379, |
|
"learning_rate": 1.6168988088045678e-06, |
|
"loss": 0.704, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.9530386740331491, |
|
"grad_norm": 0.10632950484964579, |
|
"learning_rate": 1.341675347273652e-06, |
|
"loss": 0.677, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.9576427255985267, |
|
"grad_norm": 0.1230994587161385, |
|
"learning_rate": 1.0919539307391314e-06, |
|
"loss": 0.6891, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.9622467771639043, |
|
"grad_norm": 0.1356286737876717, |
|
"learning_rate": 8.67799109322076e-07, |
|
"loss": 0.6993, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.9668508287292817, |
|
"grad_norm": 0.1544804686351126, |
|
"learning_rate": 6.692688244720358e-07, |
|
"loss": 0.6888, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.9714548802946593, |
|
"grad_norm": 0.1409581730070821, |
|
"learning_rate": 4.964143939898746e-07, |
|
"loss": 0.7092, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.9760589318600368, |
|
"grad_norm": 0.13126265245760058, |
|
"learning_rate": 3.49280498762683e-07, |
|
"loss": 0.7008, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.9806629834254144, |
|
"grad_norm": 0.13981476492383843, |
|
"learning_rate": 2.2790517121428212e-07, |
|
"loss": 0.682, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.9852670349907919, |
|
"grad_norm": 0.11466302388077335, |
|
"learning_rate": 1.323197854742775e-07, |
|
"loss": 0.7008, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.9898710865561694, |
|
"grad_norm": 0.14514253246885966, |
|
"learning_rate": 6.254904926820126e-08, |
|
"loss": 0.7039, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.994475138121547, |
|
"grad_norm": 0.12201481168818025, |
|
"learning_rate": 1.8610997530876807e-08, |
|
"loss": 0.696, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.9990791896869244, |
|
"grad_norm": 0.13117233881672566, |
|
"learning_rate": 5.169877445809767e-10, |
|
"loss": 0.6881, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 1086, |
|
"total_flos": 4801486812086272.0, |
|
"train_loss": 0.7253576234998405, |
|
"train_runtime": 22925.9932, |
|
"train_samples_per_second": 3.031, |
|
"train_steps_per_second": 0.047 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1086, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 4801486812086272.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|