0x1202's picture
Training in progress, step 200, checkpoint
98c7745 verified
raw
history blame
37.9 kB
{
"best_metric": 0.2834896147251129,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.05130178273695011,
"eval_steps": 25,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00025650891368475054,
"grad_norm": 0.2452760636806488,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.4425,
"step": 1
},
{
"epoch": 0.00025650891368475054,
"eval_loss": 0.39530715346336365,
"eval_runtime": 31.6895,
"eval_samples_per_second": 1.578,
"eval_steps_per_second": 0.221,
"step": 1
},
{
"epoch": 0.0005130178273695011,
"grad_norm": 0.17752642929553986,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.299,
"step": 2
},
{
"epoch": 0.0007695267410542517,
"grad_norm": 0.24689655005931854,
"learning_rate": 8.999999999999999e-05,
"loss": 1.5197,
"step": 3
},
{
"epoch": 0.0010260356547390022,
"grad_norm": 0.19504807889461517,
"learning_rate": 0.00011999999999999999,
"loss": 1.3453,
"step": 4
},
{
"epoch": 0.0012825445684237528,
"grad_norm": 0.1986263245344162,
"learning_rate": 0.00015,
"loss": 1.3429,
"step": 5
},
{
"epoch": 0.0015390534821085034,
"grad_norm": 0.310422420501709,
"learning_rate": 0.00017999999999999998,
"loss": 1.3507,
"step": 6
},
{
"epoch": 0.0017955623957932538,
"grad_norm": 0.2197422981262207,
"learning_rate": 0.00020999999999999998,
"loss": 1.3089,
"step": 7
},
{
"epoch": 0.0020520713094780044,
"grad_norm": 0.22613106667995453,
"learning_rate": 0.00023999999999999998,
"loss": 1.161,
"step": 8
},
{
"epoch": 0.002308580223162755,
"grad_norm": 0.2758508026599884,
"learning_rate": 0.00027,
"loss": 1.3218,
"step": 9
},
{
"epoch": 0.0025650891368475055,
"grad_norm": 0.24372389912605286,
"learning_rate": 0.0003,
"loss": 1.2277,
"step": 10
},
{
"epoch": 0.002821598050532256,
"grad_norm": 0.24940873682498932,
"learning_rate": 0.0002999794957488703,
"loss": 1.1953,
"step": 11
},
{
"epoch": 0.0030781069642170067,
"grad_norm": 0.23299287259578705,
"learning_rate": 0.0002999179886011389,
"loss": 1.1864,
"step": 12
},
{
"epoch": 0.003334615877901757,
"grad_norm": 0.20366379618644714,
"learning_rate": 0.0002998154953722457,
"loss": 1.2584,
"step": 13
},
{
"epoch": 0.0035911247915865075,
"grad_norm": 0.2235558032989502,
"learning_rate": 0.00029967204408281613,
"loss": 1.1625,
"step": 14
},
{
"epoch": 0.003847633705271258,
"grad_norm": 0.198708176612854,
"learning_rate": 0.00029948767395100045,
"loss": 1.1748,
"step": 15
},
{
"epoch": 0.004104142618956009,
"grad_norm": 0.1967567503452301,
"learning_rate": 0.0002992624353817517,
"loss": 1.1347,
"step": 16
},
{
"epoch": 0.004360651532640759,
"grad_norm": 0.15366947650909424,
"learning_rate": 0.0002989963899530457,
"loss": 1.0114,
"step": 17
},
{
"epoch": 0.00461716044632551,
"grad_norm": 0.18661633133888245,
"learning_rate": 0.00029868961039904624,
"loss": 1.2625,
"step": 18
},
{
"epoch": 0.00487366936001026,
"grad_norm": 3.7823309898376465,
"learning_rate": 0.00029834218059022024,
"loss": 1.2615,
"step": 19
},
{
"epoch": 0.005130178273695011,
"grad_norm": 0.20137062668800354,
"learning_rate": 0.00029795419551040833,
"loss": 1.27,
"step": 20
},
{
"epoch": 0.005386687187379761,
"grad_norm": 0.18843530118465424,
"learning_rate": 0.00029752576123085736,
"loss": 1.0947,
"step": 21
},
{
"epoch": 0.005643196101064512,
"grad_norm": 0.17115682363510132,
"learning_rate": 0.0002970569948812214,
"loss": 1.1604,
"step": 22
},
{
"epoch": 0.0058997050147492625,
"grad_norm": 0.17423115670681,
"learning_rate": 0.0002965480246175399,
"loss": 1.2279,
"step": 23
},
{
"epoch": 0.0061562139284340135,
"grad_norm": 0.20533309876918793,
"learning_rate": 0.0002959989895872009,
"loss": 1.3852,
"step": 24
},
{
"epoch": 0.006412722842118764,
"grad_norm": 0.19516155123710632,
"learning_rate": 0.0002954100398908995,
"loss": 1.3035,
"step": 25
},
{
"epoch": 0.006412722842118764,
"eval_loss": 0.3231082558631897,
"eval_runtime": 31.8559,
"eval_samples_per_second": 1.57,
"eval_steps_per_second": 0.22,
"step": 25
},
{
"epoch": 0.006669231755803514,
"grad_norm": 0.3001914322376251,
"learning_rate": 0.0002947813365416023,
"loss": 1.3519,
"step": 26
},
{
"epoch": 0.006925740669488265,
"grad_norm": 0.2448321133852005,
"learning_rate": 0.0002941130514205272,
"loss": 1.3134,
"step": 27
},
{
"epoch": 0.007182249583173015,
"grad_norm": 0.21013857424259186,
"learning_rate": 0.0002934053672301536,
"loss": 1.1908,
"step": 28
},
{
"epoch": 0.007438758496857766,
"grad_norm": 0.2920572757720947,
"learning_rate": 0.00029265847744427303,
"loss": 1.2764,
"step": 29
},
{
"epoch": 0.007695267410542516,
"grad_norm": 0.23325499892234802,
"learning_rate": 0.00029187258625509513,
"loss": 1.2081,
"step": 30
},
{
"epoch": 0.007951776324227267,
"grad_norm": 0.24203185737133026,
"learning_rate": 0.00029104790851742417,
"loss": 1.2996,
"step": 31
},
{
"epoch": 0.008208285237912017,
"grad_norm": 0.20244275033473969,
"learning_rate": 0.0002901846696899191,
"loss": 1.2741,
"step": 32
},
{
"epoch": 0.008464794151596768,
"grad_norm": 0.24007591605186462,
"learning_rate": 0.00028928310577345606,
"loss": 1.2926,
"step": 33
},
{
"epoch": 0.008721303065281518,
"grad_norm": 2.9329335689544678,
"learning_rate": 0.0002883434632466077,
"loss": 1.3904,
"step": 34
},
{
"epoch": 0.00897781197896627,
"grad_norm": 0.2622990906238556,
"learning_rate": 0.00028736599899825856,
"loss": 1.3224,
"step": 35
},
{
"epoch": 0.00923432089265102,
"grad_norm": 0.3389492630958557,
"learning_rate": 0.00028635098025737434,
"loss": 1.3022,
"step": 36
},
{
"epoch": 0.00949082980633577,
"grad_norm": 0.3291040062904358,
"learning_rate": 0.00028529868451994384,
"loss": 1.3772,
"step": 37
},
{
"epoch": 0.00974733872002052,
"grad_norm": 0.29008948802948,
"learning_rate": 0.0002842093994731145,
"loss": 1.3874,
"step": 38
},
{
"epoch": 0.010003847633705272,
"grad_norm": 0.2444349229335785,
"learning_rate": 0.00028308342291654174,
"loss": 1.3194,
"step": 39
},
{
"epoch": 0.010260356547390022,
"grad_norm": 0.2349006086587906,
"learning_rate": 0.00028192106268097334,
"loss": 1.2226,
"step": 40
},
{
"epoch": 0.010516865461074772,
"grad_norm": 0.3519050180912018,
"learning_rate": 0.00028072263654409154,
"loss": 1.2642,
"step": 41
},
{
"epoch": 0.010773374374759523,
"grad_norm": 3.122650384902954,
"learning_rate": 0.0002794884721436361,
"loss": 1.3993,
"step": 42
},
{
"epoch": 0.011029883288444273,
"grad_norm": 0.4444164037704468,
"learning_rate": 0.00027821890688783083,
"loss": 1.2325,
"step": 43
},
{
"epoch": 0.011286392202129025,
"grad_norm": 0.3635028302669525,
"learning_rate": 0.0002769142878631403,
"loss": 1.2689,
"step": 44
},
{
"epoch": 0.011542901115813775,
"grad_norm": 0.5229223370552063,
"learning_rate": 0.00027557497173937923,
"loss": 1.3902,
"step": 45
},
{
"epoch": 0.011799410029498525,
"grad_norm": 0.6191115379333496,
"learning_rate": 0.000274201324672203,
"loss": 1.4242,
"step": 46
},
{
"epoch": 0.012055918943183275,
"grad_norm": 1.497909665107727,
"learning_rate": 0.00027279372220300385,
"loss": 1.7142,
"step": 47
},
{
"epoch": 0.012312427856868027,
"grad_norm": 1.673071026802063,
"learning_rate": 0.0002713525491562421,
"loss": 1.6996,
"step": 48
},
{
"epoch": 0.012568936770552777,
"grad_norm": 7.249523639678955,
"learning_rate": 0.00026987819953423867,
"loss": 1.9288,
"step": 49
},
{
"epoch": 0.012825445684237527,
"grad_norm": 6.95615816116333,
"learning_rate": 0.00026837107640945905,
"loss": 2.1629,
"step": 50
},
{
"epoch": 0.012825445684237527,
"eval_loss": 0.33410218358039856,
"eval_runtime": 31.8447,
"eval_samples_per_second": 1.57,
"eval_steps_per_second": 0.22,
"step": 50
},
{
"epoch": 0.013081954597922277,
"grad_norm": 0.581642210483551,
"learning_rate": 0.0002668315918143169,
"loss": 1.2623,
"step": 51
},
{
"epoch": 0.013338463511607028,
"grad_norm": 0.468250036239624,
"learning_rate": 0.00026526016662852886,
"loss": 1.2536,
"step": 52
},
{
"epoch": 0.01359497242529178,
"grad_norm": 0.3212797939777374,
"learning_rate": 0.00026365723046405023,
"loss": 1.1647,
"step": 53
},
{
"epoch": 0.01385148133897653,
"grad_norm": 0.24367539584636688,
"learning_rate": 0.0002620232215476231,
"loss": 1.2143,
"step": 54
},
{
"epoch": 0.01410799025266128,
"grad_norm": 0.1994316130876541,
"learning_rate": 0.0002603585866009697,
"loss": 1.1233,
"step": 55
},
{
"epoch": 0.01436449916634603,
"grad_norm": 0.1888861209154129,
"learning_rate": 0.00025866378071866334,
"loss": 1.0761,
"step": 56
},
{
"epoch": 0.014621008080030782,
"grad_norm": 0.19591979682445526,
"learning_rate": 0.00025693926724370956,
"loss": 1.1953,
"step": 57
},
{
"epoch": 0.014877516993715532,
"grad_norm": 0.20236290991306305,
"learning_rate": 0.00025518551764087326,
"loss": 1.0408,
"step": 58
},
{
"epoch": 0.015134025907400282,
"grad_norm": 0.21848787367343903,
"learning_rate": 0.00025340301136778483,
"loss": 1.1106,
"step": 59
},
{
"epoch": 0.015390534821085032,
"grad_norm": 0.1914772242307663,
"learning_rate": 0.00025159223574386114,
"loss": 1.0984,
"step": 60
},
{
"epoch": 0.015647043734769783,
"grad_norm": 0.18511377274990082,
"learning_rate": 0.0002497536858170772,
"loss": 1.0421,
"step": 61
},
{
"epoch": 0.015903552648454534,
"grad_norm": 0.20475885272026062,
"learning_rate": 0.00024788786422862526,
"loss": 1.1696,
"step": 62
},
{
"epoch": 0.016160061562139283,
"grad_norm": 0.17918260395526886,
"learning_rate": 0.00024599528107549745,
"loss": 1.1129,
"step": 63
},
{
"epoch": 0.016416570475824035,
"grad_norm": 0.18487805128097534,
"learning_rate": 0.00024407645377103054,
"loss": 1.048,
"step": 64
},
{
"epoch": 0.016673079389508787,
"grad_norm": 0.15813754498958588,
"learning_rate": 0.00024213190690345018,
"loss": 1.1329,
"step": 65
},
{
"epoch": 0.016929588303193535,
"grad_norm": 0.17792178690433502,
"learning_rate": 0.00024016217209245374,
"loss": 1.0894,
"step": 66
},
{
"epoch": 0.017186097216878287,
"grad_norm": 0.19200259447097778,
"learning_rate": 0.00023816778784387094,
"loss": 1.0754,
"step": 67
},
{
"epoch": 0.017442606130563035,
"grad_norm": 0.17065322399139404,
"learning_rate": 0.0002361492994024415,
"loss": 1.0666,
"step": 68
},
{
"epoch": 0.017699115044247787,
"grad_norm": 0.18813008069992065,
"learning_rate": 0.0002341072586027509,
"loss": 1.2129,
"step": 69
},
{
"epoch": 0.01795562395793254,
"grad_norm": 0.20512309670448303,
"learning_rate": 0.00023204222371836405,
"loss": 1.0896,
"step": 70
},
{
"epoch": 0.018212132871617288,
"grad_norm": 0.16995452344417572,
"learning_rate": 0.00022995475930919905,
"loss": 1.0235,
"step": 71
},
{
"epoch": 0.01846864178530204,
"grad_norm": 0.1726696640253067,
"learning_rate": 0.00022784543606718227,
"loss": 1.171,
"step": 72
},
{
"epoch": 0.01872515069898679,
"grad_norm": 0.20911255478858948,
"learning_rate": 0.00022571483066022657,
"loss": 1.3302,
"step": 73
},
{
"epoch": 0.01898165961267154,
"grad_norm": 0.1987486630678177,
"learning_rate": 0.0002235635255745762,
"loss": 1.2373,
"step": 74
},
{
"epoch": 0.019238168526356292,
"grad_norm": 0.19390402734279633,
"learning_rate": 0.00022139210895556104,
"loss": 1.2301,
"step": 75
},
{
"epoch": 0.019238168526356292,
"eval_loss": 0.294239342212677,
"eval_runtime": 31.8459,
"eval_samples_per_second": 1.57,
"eval_steps_per_second": 0.22,
"step": 75
},
{
"epoch": 0.01949467744004104,
"grad_norm": 0.3244732618331909,
"learning_rate": 0.00021920117444680317,
"loss": 1.2272,
"step": 76
},
{
"epoch": 0.019751186353725792,
"grad_norm": 0.2098388969898224,
"learning_rate": 0.00021699132102792097,
"loss": 1.1795,
"step": 77
},
{
"epoch": 0.020007695267410544,
"grad_norm": 0.20879194140434265,
"learning_rate": 0.0002147631528507739,
"loss": 1.1792,
"step": 78
},
{
"epoch": 0.020264204181095292,
"grad_norm": 0.19269469380378723,
"learning_rate": 0.00021251727907429355,
"loss": 1.1004,
"step": 79
},
{
"epoch": 0.020520713094780044,
"grad_norm": 0.21726348996162415,
"learning_rate": 0.0002102543136979454,
"loss": 1.2513,
"step": 80
},
{
"epoch": 0.020777222008464793,
"grad_norm": 0.2271123230457306,
"learning_rate": 0.0002079748753938678,
"loss": 1.3485,
"step": 81
},
{
"epoch": 0.021033730922149545,
"grad_norm": 0.2089950293302536,
"learning_rate": 0.0002056795873377331,
"loss": 1.324,
"step": 82
},
{
"epoch": 0.021290239835834297,
"grad_norm": 0.19828511774539948,
"learning_rate": 0.00020336907703837748,
"loss": 1.2114,
"step": 83
},
{
"epoch": 0.021546748749519045,
"grad_norm": 0.3832542300224304,
"learning_rate": 0.00020104397616624645,
"loss": 1.3824,
"step": 84
},
{
"epoch": 0.021803257663203797,
"grad_norm": 0.19173561036586761,
"learning_rate": 0.00019870492038070252,
"loss": 1.0925,
"step": 85
},
{
"epoch": 0.022059766576888545,
"grad_norm": 0.23011600971221924,
"learning_rate": 0.0001963525491562421,
"loss": 1.1248,
"step": 86
},
{
"epoch": 0.022316275490573297,
"grad_norm": 0.21012580394744873,
"learning_rate": 0.0001939875056076697,
"loss": 1.2874,
"step": 87
},
{
"epoch": 0.02257278440425805,
"grad_norm": 0.3018735647201538,
"learning_rate": 0.00019161043631427666,
"loss": 1.2895,
"step": 88
},
{
"epoch": 0.022829293317942798,
"grad_norm": 0.2396928071975708,
"learning_rate": 0.00018922199114307294,
"loss": 1.2793,
"step": 89
},
{
"epoch": 0.02308580223162755,
"grad_norm": 0.25280284881591797,
"learning_rate": 0.00018682282307111987,
"loss": 1.3991,
"step": 90
},
{
"epoch": 0.023342311145312298,
"grad_norm": 0.27613183856010437,
"learning_rate": 0.00018441358800701273,
"loss": 1.2545,
"step": 91
},
{
"epoch": 0.02359882005899705,
"grad_norm": 0.27307531237602234,
"learning_rate": 0.00018199494461156203,
"loss": 1.1296,
"step": 92
},
{
"epoch": 0.0238553289726818,
"grad_norm": 0.28749963641166687,
"learning_rate": 0.000179567554117722,
"loss": 1.1461,
"step": 93
},
{
"epoch": 0.02411183788636655,
"grad_norm": 0.3967796266078949,
"learning_rate": 0.00017713208014981648,
"loss": 1.3377,
"step": 94
},
{
"epoch": 0.024368346800051302,
"grad_norm": 0.3728596866130829,
"learning_rate": 0.00017468918854211007,
"loss": 1.2604,
"step": 95
},
{
"epoch": 0.024624855713736054,
"grad_norm": 0.45030513405799866,
"learning_rate": 0.00017223954715677627,
"loss": 1.0331,
"step": 96
},
{
"epoch": 0.024881364627420802,
"grad_norm": 0.8793625235557556,
"learning_rate": 0.00016978382570131034,
"loss": 1.2188,
"step": 97
},
{
"epoch": 0.025137873541105554,
"grad_norm": 0.9390032291412354,
"learning_rate": 0.00016732269554543794,
"loss": 1.3501,
"step": 98
},
{
"epoch": 0.025394382454790303,
"grad_norm": 0.9944431185722351,
"learning_rate": 0.00016485682953756942,
"loss": 1.5824,
"step": 99
},
{
"epoch": 0.025650891368475055,
"grad_norm": 1.055663824081421,
"learning_rate": 0.00016238690182084986,
"loss": 1.3991,
"step": 100
},
{
"epoch": 0.025650891368475055,
"eval_loss": 0.3280201852321625,
"eval_runtime": 31.818,
"eval_samples_per_second": 1.571,
"eval_steps_per_second": 0.22,
"step": 100
},
{
"epoch": 0.025907400282159807,
"grad_norm": 0.8113174438476562,
"learning_rate": 0.0001599135876488549,
"loss": 1.2319,
"step": 101
},
{
"epoch": 0.026163909195844555,
"grad_norm": 0.6201874613761902,
"learning_rate": 0.00015743756320098332,
"loss": 1.2461,
"step": 102
},
{
"epoch": 0.026420418109529307,
"grad_norm": 0.49239474534988403,
"learning_rate": 0.0001549595053975962,
"loss": 1.2268,
"step": 103
},
{
"epoch": 0.026676927023214055,
"grad_norm": 0.26686975359916687,
"learning_rate": 0.00015248009171495378,
"loss": 1.1162,
"step": 104
},
{
"epoch": 0.026933435936898807,
"grad_norm": 0.20602157711982727,
"learning_rate": 0.00015,
"loss": 1.0782,
"step": 105
},
{
"epoch": 0.02718994485058356,
"grad_norm": 0.2141565978527069,
"learning_rate": 0.00014751990828504622,
"loss": 1.1148,
"step": 106
},
{
"epoch": 0.027446453764268307,
"grad_norm": 0.22891749441623688,
"learning_rate": 0.00014504049460240375,
"loss": 1.0624,
"step": 107
},
{
"epoch": 0.02770296267795306,
"grad_norm": 0.23283091187477112,
"learning_rate": 0.00014256243679901663,
"loss": 1.2018,
"step": 108
},
{
"epoch": 0.027959471591637808,
"grad_norm": 0.23905718326568604,
"learning_rate": 0.00014008641235114508,
"loss": 1.2196,
"step": 109
},
{
"epoch": 0.02821598050532256,
"grad_norm": 0.23581498861312866,
"learning_rate": 0.00013761309817915014,
"loss": 1.1004,
"step": 110
},
{
"epoch": 0.02847248941900731,
"grad_norm": 0.2037910372018814,
"learning_rate": 0.00013514317046243058,
"loss": 1.1096,
"step": 111
},
{
"epoch": 0.02872899833269206,
"grad_norm": 0.21541164815425873,
"learning_rate": 0.00013267730445456208,
"loss": 1.0608,
"step": 112
},
{
"epoch": 0.028985507246376812,
"grad_norm": 0.18735271692276,
"learning_rate": 0.00013021617429868963,
"loss": 1.1208,
"step": 113
},
{
"epoch": 0.029242016160061564,
"grad_norm": 0.18497084081172943,
"learning_rate": 0.00012776045284322368,
"loss": 1.026,
"step": 114
},
{
"epoch": 0.029498525073746312,
"grad_norm": 0.16876810789108276,
"learning_rate": 0.00012531081145788987,
"loss": 0.9706,
"step": 115
},
{
"epoch": 0.029755033987431064,
"grad_norm": 0.16586469113826752,
"learning_rate": 0.00012286791985018355,
"loss": 1.0182,
"step": 116
},
{
"epoch": 0.030011542901115813,
"grad_norm": 0.1694636046886444,
"learning_rate": 0.00012043244588227796,
"loss": 1.0287,
"step": 117
},
{
"epoch": 0.030268051814800565,
"grad_norm": 0.15620790421962738,
"learning_rate": 0.00011800505538843798,
"loss": 1.0433,
"step": 118
},
{
"epoch": 0.030524560728485316,
"grad_norm": 0.1938304752111435,
"learning_rate": 0.00011558641199298727,
"loss": 1.1902,
"step": 119
},
{
"epoch": 0.030781069642170065,
"grad_norm": 0.16703596711158752,
"learning_rate": 0.00011317717692888012,
"loss": 1.048,
"step": 120
},
{
"epoch": 0.031037578555854817,
"grad_norm": 0.1851804554462433,
"learning_rate": 0.00011077800885692702,
"loss": 1.0233,
"step": 121
},
{
"epoch": 0.031294087469539565,
"grad_norm": 0.2184515744447708,
"learning_rate": 0.00010838956368572334,
"loss": 1.0393,
"step": 122
},
{
"epoch": 0.031550596383224314,
"grad_norm": 0.18380595743656158,
"learning_rate": 0.0001060124943923303,
"loss": 1.2678,
"step": 123
},
{
"epoch": 0.03180710529690907,
"grad_norm": 0.1822306215763092,
"learning_rate": 0.0001036474508437579,
"loss": 1.3023,
"step": 124
},
{
"epoch": 0.03206361421059382,
"grad_norm": 0.17711974680423737,
"learning_rate": 0.00010129507961929748,
"loss": 1.1011,
"step": 125
},
{
"epoch": 0.03206361421059382,
"eval_loss": 0.2889536917209625,
"eval_runtime": 31.8454,
"eval_samples_per_second": 1.57,
"eval_steps_per_second": 0.22,
"step": 125
},
{
"epoch": 0.032320123124278566,
"grad_norm": 0.17134203016757965,
"learning_rate": 9.895602383375353e-05,
"loss": 1.1858,
"step": 126
},
{
"epoch": 0.03257663203796332,
"grad_norm": 0.1913871020078659,
"learning_rate": 9.663092296162251e-05,
"loss": 1.2919,
"step": 127
},
{
"epoch": 0.03283314095164807,
"grad_norm": 0.16769054532051086,
"learning_rate": 9.432041266226686e-05,
"loss": 1.1056,
"step": 128
},
{
"epoch": 0.03308964986533282,
"grad_norm": 0.2665941119194031,
"learning_rate": 9.202512460613219e-05,
"loss": 1.2915,
"step": 129
},
{
"epoch": 0.03334615877901757,
"grad_norm": 0.19861632585525513,
"learning_rate": 8.97456863020546e-05,
"loss": 1.3115,
"step": 130
},
{
"epoch": 0.03360266769270232,
"grad_norm": 0.2502779960632324,
"learning_rate": 8.748272092570646e-05,
"loss": 1.1837,
"step": 131
},
{
"epoch": 0.03385917660638707,
"grad_norm": 0.21725796163082123,
"learning_rate": 8.523684714922608e-05,
"loss": 1.2289,
"step": 132
},
{
"epoch": 0.034115685520071826,
"grad_norm": 0.19160696864128113,
"learning_rate": 8.300867897207903e-05,
"loss": 1.2762,
"step": 133
},
{
"epoch": 0.034372194433756574,
"grad_norm": 0.19468261301517487,
"learning_rate": 8.079882555319684e-05,
"loss": 1.1529,
"step": 134
},
{
"epoch": 0.03462870334744132,
"grad_norm": 0.19138556718826294,
"learning_rate": 7.860789104443896e-05,
"loss": 1.2395,
"step": 135
},
{
"epoch": 0.03488521226112607,
"grad_norm": 0.19424627721309662,
"learning_rate": 7.643647442542382e-05,
"loss": 1.2203,
"step": 136
},
{
"epoch": 0.035141721174810826,
"grad_norm": 0.20424920320510864,
"learning_rate": 7.428516933977347e-05,
"loss": 1.2691,
"step": 137
},
{
"epoch": 0.035398230088495575,
"grad_norm": 0.20350797474384308,
"learning_rate": 7.215456393281776e-05,
"loss": 1.2016,
"step": 138
},
{
"epoch": 0.03565473900218032,
"grad_norm": 0.23430098593235016,
"learning_rate": 7.004524069080096e-05,
"loss": 1.1659,
"step": 139
},
{
"epoch": 0.03591124791586508,
"grad_norm": 0.35070452094078064,
"learning_rate": 6.795777628163599e-05,
"loss": 1.3081,
"step": 140
},
{
"epoch": 0.03616775682954983,
"grad_norm": 0.24181930720806122,
"learning_rate": 6.58927413972491e-05,
"loss": 1.2101,
"step": 141
},
{
"epoch": 0.036424265743234575,
"grad_norm": 0.23743180930614471,
"learning_rate": 6.385070059755846e-05,
"loss": 1.1793,
"step": 142
},
{
"epoch": 0.03668077465691933,
"grad_norm": 0.23980997502803802,
"learning_rate": 6.183221215612904e-05,
"loss": 1.0602,
"step": 143
},
{
"epoch": 0.03693728357060408,
"grad_norm": 0.29763689637184143,
"learning_rate": 5.983782790754623e-05,
"loss": 1.1392,
"step": 144
},
{
"epoch": 0.03719379248428883,
"grad_norm": 0.3883207142353058,
"learning_rate": 5.786809309654982e-05,
"loss": 1.0695,
"step": 145
},
{
"epoch": 0.03745030139797358,
"grad_norm": 0.4295353591442108,
"learning_rate": 5.592354622896944e-05,
"loss": 1.2206,
"step": 146
},
{
"epoch": 0.03770681031165833,
"grad_norm": 1.545823574066162,
"learning_rate": 5.40047189245025e-05,
"loss": 1.3174,
"step": 147
},
{
"epoch": 0.03796331922534308,
"grad_norm": 1.690621018409729,
"learning_rate": 5.211213577137469e-05,
"loss": 1.2853,
"step": 148
},
{
"epoch": 0.03821982813902783,
"grad_norm": 0.4993475675582886,
"learning_rate": 5.024631418292274e-05,
"loss": 1.2542,
"step": 149
},
{
"epoch": 0.038476337052712584,
"grad_norm": 0.9557994604110718,
"learning_rate": 4.840776425613886e-05,
"loss": 1.4121,
"step": 150
},
{
"epoch": 0.038476337052712584,
"eval_loss": 0.2887627184391022,
"eval_runtime": 31.8449,
"eval_samples_per_second": 1.57,
"eval_steps_per_second": 0.22,
"step": 150
},
{
"epoch": 0.03873284596639733,
"grad_norm": 0.31122446060180664,
"learning_rate": 4.659698863221513e-05,
"loss": 1.1079,
"step": 151
},
{
"epoch": 0.03898935488008208,
"grad_norm": 0.24545907974243164,
"learning_rate": 4.481448235912671e-05,
"loss": 1.0046,
"step": 152
},
{
"epoch": 0.039245863793766836,
"grad_norm": 0.20316024124622345,
"learning_rate": 4.306073275629044e-05,
"loss": 1.0567,
"step": 153
},
{
"epoch": 0.039502372707451584,
"grad_norm": 0.20005953311920166,
"learning_rate": 4.133621928133665e-05,
"loss": 1.1053,
"step": 154
},
{
"epoch": 0.03975888162113633,
"grad_norm": 0.1976904422044754,
"learning_rate": 3.964141339903026e-05,
"loss": 1.1163,
"step": 155
},
{
"epoch": 0.04001539053482109,
"grad_norm": 0.2005474865436554,
"learning_rate": 3.797677845237696e-05,
"loss": 1.068,
"step": 156
},
{
"epoch": 0.040271899448505837,
"grad_norm": 0.18155024945735931,
"learning_rate": 3.634276953594982e-05,
"loss": 1.1684,
"step": 157
},
{
"epoch": 0.040528408362190585,
"grad_norm": 0.17311017215251923,
"learning_rate": 3.473983337147118e-05,
"loss": 1.0116,
"step": 158
},
{
"epoch": 0.04078491727587533,
"grad_norm": 0.17773979902267456,
"learning_rate": 3.316840818568315e-05,
"loss": 1.0785,
"step": 159
},
{
"epoch": 0.04104142618956009,
"grad_norm": 0.2995525002479553,
"learning_rate": 3.162892359054098e-05,
"loss": 1.0934,
"step": 160
},
{
"epoch": 0.04129793510324484,
"grad_norm": 0.17463243007659912,
"learning_rate": 3.0121800465761293e-05,
"loss": 0.9919,
"step": 161
},
{
"epoch": 0.041554444016929586,
"grad_norm": 0.17306958138942719,
"learning_rate": 2.8647450843757897e-05,
"loss": 1.1307,
"step": 162
},
{
"epoch": 0.04181095293061434,
"grad_norm": 0.15881255269050598,
"learning_rate": 2.7206277796996144e-05,
"loss": 1.1525,
"step": 163
},
{
"epoch": 0.04206746184429909,
"grad_norm": 0.16048413515090942,
"learning_rate": 2.5798675327796993e-05,
"loss": 1.1592,
"step": 164
},
{
"epoch": 0.04232397075798384,
"grad_norm": 0.15379053354263306,
"learning_rate": 2.4425028260620715e-05,
"loss": 0.9624,
"step": 165
},
{
"epoch": 0.04258047967166859,
"grad_norm": 0.2737276554107666,
"learning_rate": 2.3085712136859668e-05,
"loss": 1.0579,
"step": 166
},
{
"epoch": 0.04283698858535334,
"grad_norm": 0.15662381052970886,
"learning_rate": 2.178109311216913e-05,
"loss": 1.0594,
"step": 167
},
{
"epoch": 0.04309349749903809,
"grad_norm": 0.16340816020965576,
"learning_rate": 2.0511527856363912e-05,
"loss": 1.0777,
"step": 168
},
{
"epoch": 0.043350006412722845,
"grad_norm": 0.16664321720600128,
"learning_rate": 1.927736345590839e-05,
"loss": 1.0744,
"step": 169
},
{
"epoch": 0.043606515326407594,
"grad_norm": 0.17165815830230713,
"learning_rate": 1.8078937319026654e-05,
"loss": 1.0481,
"step": 170
},
{
"epoch": 0.04386302424009234,
"grad_norm": 0.17665603756904602,
"learning_rate": 1.6916577083458228e-05,
"loss": 1.17,
"step": 171
},
{
"epoch": 0.04411953315377709,
"grad_norm": 0.15224668383598328,
"learning_rate": 1.579060052688548e-05,
"loss": 0.9989,
"step": 172
},
{
"epoch": 0.044376042067461846,
"grad_norm": 0.1608746498823166,
"learning_rate": 1.4701315480056164e-05,
"loss": 1.1762,
"step": 173
},
{
"epoch": 0.044632550981146595,
"grad_norm": 0.1899283528327942,
"learning_rate": 1.3649019742625623e-05,
"loss": 1.1426,
"step": 174
},
{
"epoch": 0.04488905989483134,
"grad_norm": 0.18082468211650848,
"learning_rate": 1.2634001001741373e-05,
"loss": 1.1745,
"step": 175
},
{
"epoch": 0.04488905989483134,
"eval_loss": 0.28639593720436096,
"eval_runtime": 31.8318,
"eval_samples_per_second": 1.571,
"eval_steps_per_second": 0.22,
"step": 175
},
{
"epoch": 0.0451455688085161,
"grad_norm": 0.19314950704574585,
"learning_rate": 1.1656536753392287e-05,
"loss": 1.2775,
"step": 176
},
{
"epoch": 0.04540207772220085,
"grad_norm": 0.18126244843006134,
"learning_rate": 1.0716894226543953e-05,
"loss": 1.0684,
"step": 177
},
{
"epoch": 0.045658586635885595,
"grad_norm": 0.18341222405433655,
"learning_rate": 9.815330310080887e-06,
"loss": 1.1358,
"step": 178
},
{
"epoch": 0.04591509554957035,
"grad_norm": 0.18251806497573853,
"learning_rate": 8.952091482575824e-06,
"loss": 1.2176,
"step": 179
},
{
"epoch": 0.0461716044632551,
"grad_norm": 0.1876961886882782,
"learning_rate": 8.127413744904804e-06,
"loss": 1.1623,
"step": 180
},
{
"epoch": 0.04642811337693985,
"grad_norm": 0.24478206038475037,
"learning_rate": 7.34152255572697e-06,
"loss": 1.3566,
"step": 181
},
{
"epoch": 0.046684622290624596,
"grad_norm": 0.22969405353069305,
"learning_rate": 6.594632769846353e-06,
"loss": 1.335,
"step": 182
},
{
"epoch": 0.04694113120430935,
"grad_norm": 1.3417153358459473,
"learning_rate": 5.886948579472778e-06,
"loss": 1.3391,
"step": 183
},
{
"epoch": 0.0471976401179941,
"grad_norm": 0.21031659841537476,
"learning_rate": 5.218663458397715e-06,
"loss": 1.214,
"step": 184
},
{
"epoch": 0.04745414903167885,
"grad_norm": 0.20800001919269562,
"learning_rate": 4.589960109100444e-06,
"loss": 1.2491,
"step": 185
},
{
"epoch": 0.0477106579453636,
"grad_norm": 0.2116893082857132,
"learning_rate": 4.001010412799138e-06,
"loss": 1.3338,
"step": 186
},
{
"epoch": 0.04796716685904835,
"grad_norm": 0.20268724858760834,
"learning_rate": 3.451975382460109e-06,
"loss": 1.1552,
"step": 187
},
{
"epoch": 0.0482236757727331,
"grad_norm": 0.21017323434352875,
"learning_rate": 2.9430051187785962e-06,
"loss": 1.1528,
"step": 188
},
{
"epoch": 0.048480184686417856,
"grad_norm": 0.23236685991287231,
"learning_rate": 2.4742387691426445e-06,
"loss": 1.2059,
"step": 189
},
{
"epoch": 0.048736693600102604,
"grad_norm": 0.26075395941734314,
"learning_rate": 2.0458044895916513e-06,
"loss": 1.2194,
"step": 190
},
{
"epoch": 0.04899320251378735,
"grad_norm": 0.263323575258255,
"learning_rate": 1.6578194097797258e-06,
"loss": 1.2614,
"step": 191
},
{
"epoch": 0.04924971142747211,
"grad_norm": 0.2640993297100067,
"learning_rate": 1.3103896009537207e-06,
"loss": 1.1803,
"step": 192
},
{
"epoch": 0.049506220341156856,
"grad_norm": 0.2626555263996124,
"learning_rate": 1.0036100469542786e-06,
"loss": 1.1416,
"step": 193
},
{
"epoch": 0.049762729254841605,
"grad_norm": 0.380075603723526,
"learning_rate": 7.375646182482875e-07,
"loss": 1.1872,
"step": 194
},
{
"epoch": 0.05001923816852635,
"grad_norm": 0.40151721239089966,
"learning_rate": 5.123260489995229e-07,
"loss": 1.2733,
"step": 195
},
{
"epoch": 0.05027574708221111,
"grad_norm": 0.546929657459259,
"learning_rate": 3.2795591718381975e-07,
"loss": 1.1326,
"step": 196
},
{
"epoch": 0.05053225599589586,
"grad_norm": 0.834646463394165,
"learning_rate": 1.8450462775428942e-07,
"loss": 0.945,
"step": 197
},
{
"epoch": 0.050788764909580605,
"grad_norm": 0.5064449906349182,
"learning_rate": 8.201139886109264e-08,
"loss": 1.1507,
"step": 198
},
{
"epoch": 0.05104527382326536,
"grad_norm": 0.6023350358009338,
"learning_rate": 2.0504251129649374e-08,
"loss": 1.3557,
"step": 199
},
{
"epoch": 0.05130178273695011,
"grad_norm": 0.7777907252311707,
"learning_rate": 0.0,
"loss": 1.4745,
"step": 200
},
{
"epoch": 0.05130178273695011,
"eval_loss": 0.2834896147251129,
"eval_runtime": 31.838,
"eval_samples_per_second": 1.57,
"eval_steps_per_second": 0.22,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.114751925878784e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}