QwQ-RP-LoRA / checkpoint-324 /trainer_state.json
Undi95's picture
Upload folder using huggingface_hub
75b9830 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.49961449498843485,
"eval_steps": 500,
"global_step": 324,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015420200462606013,
"grad_norm": 0.06960189342498779,
"learning_rate": 1e-05,
"loss": 0.9546,
"step": 1
},
{
"epoch": 0.0030840400925212026,
"grad_norm": 0.07705054432153702,
"learning_rate": 2e-05,
"loss": 1.0023,
"step": 2
},
{
"epoch": 0.004626060138781804,
"grad_norm": 0.07115544378757477,
"learning_rate": 3e-05,
"loss": 0.9733,
"step": 3
},
{
"epoch": 0.006168080185042405,
"grad_norm": 0.06770255416631699,
"learning_rate": 4e-05,
"loss": 0.8908,
"step": 4
},
{
"epoch": 0.007710100231303007,
"grad_norm": 0.06454406678676605,
"learning_rate": 5e-05,
"loss": 0.9168,
"step": 5
},
{
"epoch": 0.009252120277563608,
"grad_norm": 0.0818350687623024,
"learning_rate": 6e-05,
"loss": 0.8822,
"step": 6
},
{
"epoch": 0.01079414032382421,
"grad_norm": 0.08504347503185272,
"learning_rate": 7e-05,
"loss": 0.9563,
"step": 7
},
{
"epoch": 0.01233616037008481,
"grad_norm": 0.08642569184303284,
"learning_rate": 8e-05,
"loss": 0.9324,
"step": 8
},
{
"epoch": 0.013878180416345412,
"grad_norm": 0.07200746238231659,
"learning_rate": 9e-05,
"loss": 0.8557,
"step": 9
},
{
"epoch": 0.015420200462606014,
"grad_norm": 0.0539001002907753,
"learning_rate": 0.0001,
"loss": 0.8316,
"step": 10
},
{
"epoch": 0.016962220508866616,
"grad_norm": 0.05532313138246536,
"learning_rate": 0.00011000000000000002,
"loss": 0.869,
"step": 11
},
{
"epoch": 0.018504240555127217,
"grad_norm": 0.04828835651278496,
"learning_rate": 0.00012,
"loss": 0.9004,
"step": 12
},
{
"epoch": 0.020046260601387818,
"grad_norm": 0.051899779587984085,
"learning_rate": 0.00013000000000000002,
"loss": 0.7818,
"step": 13
},
{
"epoch": 0.02158828064764842,
"grad_norm": 0.07032614946365356,
"learning_rate": 0.00014,
"loss": 0.7946,
"step": 14
},
{
"epoch": 0.02313030069390902,
"grad_norm": 0.08230099081993103,
"learning_rate": 0.00015000000000000001,
"loss": 0.934,
"step": 15
},
{
"epoch": 0.02467232074016962,
"grad_norm": 0.08811169862747192,
"learning_rate": 0.00016,
"loss": 0.8621,
"step": 16
},
{
"epoch": 0.026214340786430222,
"grad_norm": 0.062236446887254715,
"learning_rate": 0.00017,
"loss": 0.8186,
"step": 17
},
{
"epoch": 0.027756360832690823,
"grad_norm": 0.052294306457042694,
"learning_rate": 0.00018,
"loss": 0.8124,
"step": 18
},
{
"epoch": 0.029298380878951428,
"grad_norm": 0.032340776175260544,
"learning_rate": 0.00019,
"loss": 0.8598,
"step": 19
},
{
"epoch": 0.03084040092521203,
"grad_norm": 0.028073711320757866,
"learning_rate": 0.0002,
"loss": 0.9087,
"step": 20
},
{
"epoch": 0.03238242097147263,
"grad_norm": 0.027820633724331856,
"learning_rate": 0.00019999969691239107,
"loss": 0.7093,
"step": 21
},
{
"epoch": 0.03392444101773323,
"grad_norm": 0.032430652529001236,
"learning_rate": 0.00019999878765140146,
"loss": 0.7825,
"step": 22
},
{
"epoch": 0.03546646106399383,
"grad_norm": 0.034685954451560974,
"learning_rate": 0.000199997272222543,
"loss": 0.7787,
"step": 23
},
{
"epoch": 0.03700848111025443,
"grad_norm": 0.033446088433265686,
"learning_rate": 0.0001999951506350017,
"loss": 0.8636,
"step": 24
},
{
"epoch": 0.03855050115651504,
"grad_norm": 0.026805371046066284,
"learning_rate": 0.0001999924229016382,
"loss": 0.904,
"step": 25
},
{
"epoch": 0.040092521202775636,
"grad_norm": 0.024178853258490562,
"learning_rate": 0.0001999890890389873,
"loss": 0.7428,
"step": 26
},
{
"epoch": 0.04163454124903624,
"grad_norm": 0.024074744433164597,
"learning_rate": 0.00019998514906725804,
"loss": 0.8562,
"step": 27
},
{
"epoch": 0.04317656129529684,
"grad_norm": 0.025082379579544067,
"learning_rate": 0.0001999806030103336,
"loss": 0.8078,
"step": 28
},
{
"epoch": 0.04471858134155744,
"grad_norm": 0.027726231142878532,
"learning_rate": 0.00019997545089577102,
"loss": 0.7133,
"step": 29
},
{
"epoch": 0.04626060138781804,
"grad_norm": 0.02736576274037361,
"learning_rate": 0.00019996969275480115,
"loss": 0.7826,
"step": 30
},
{
"epoch": 0.047802621434078645,
"grad_norm": 0.0234544537961483,
"learning_rate": 0.0001999633286223284,
"loss": 0.7424,
"step": 31
},
{
"epoch": 0.04934464148033924,
"grad_norm": 0.025730784982442856,
"learning_rate": 0.00019995635853693058,
"loss": 0.7584,
"step": 32
},
{
"epoch": 0.05088666152659985,
"grad_norm": 0.02729860506951809,
"learning_rate": 0.00019994878254085861,
"loss": 0.73,
"step": 33
},
{
"epoch": 0.052428681572860444,
"grad_norm": 0.027428491041064262,
"learning_rate": 0.00019994060068003627,
"loss": 0.854,
"step": 34
},
{
"epoch": 0.05397070161912105,
"grad_norm": 0.024362564086914062,
"learning_rate": 0.00019993181300406005,
"loss": 0.7441,
"step": 35
},
{
"epoch": 0.05551272166538165,
"grad_norm": 0.024825185537338257,
"learning_rate": 0.00019992241956619863,
"loss": 0.7367,
"step": 36
},
{
"epoch": 0.05705474171164225,
"grad_norm": 0.023908289149403572,
"learning_rate": 0.00019991242042339264,
"loss": 0.8266,
"step": 37
},
{
"epoch": 0.058596761757902856,
"grad_norm": 0.023762725293636322,
"learning_rate": 0.00019990181563625447,
"loss": 0.829,
"step": 38
},
{
"epoch": 0.06013878180416345,
"grad_norm": 0.02654297836124897,
"learning_rate": 0.0001998906052690677,
"loss": 0.7635,
"step": 39
},
{
"epoch": 0.06168080185042406,
"grad_norm": 0.026318082585930824,
"learning_rate": 0.00019987878938978684,
"loss": 0.7623,
"step": 40
},
{
"epoch": 0.06322282189668466,
"grad_norm": 0.024309197440743446,
"learning_rate": 0.00019986636807003673,
"loss": 0.7494,
"step": 41
},
{
"epoch": 0.06476484194294525,
"grad_norm": 0.022980719804763794,
"learning_rate": 0.00019985334138511237,
"loss": 0.748,
"step": 42
},
{
"epoch": 0.06630686198920586,
"grad_norm": 0.023796193301677704,
"learning_rate": 0.00019983970941397835,
"loss": 0.8684,
"step": 43
},
{
"epoch": 0.06784888203546646,
"grad_norm": 0.02435590885579586,
"learning_rate": 0.00019982547223926824,
"loss": 0.6749,
"step": 44
},
{
"epoch": 0.06939090208172706,
"grad_norm": 0.026482658460736275,
"learning_rate": 0.0001998106299472843,
"loss": 0.7684,
"step": 45
},
{
"epoch": 0.07093292212798766,
"grad_norm": 0.025558117777109146,
"learning_rate": 0.0001997951826279968,
"loss": 0.7507,
"step": 46
},
{
"epoch": 0.07247494217424827,
"grad_norm": 0.027371902018785477,
"learning_rate": 0.00019977913037504355,
"loss": 0.7377,
"step": 47
},
{
"epoch": 0.07401696222050887,
"grad_norm": 0.0234097708016634,
"learning_rate": 0.00019976247328572938,
"loss": 0.7675,
"step": 48
},
{
"epoch": 0.07555898226676946,
"grad_norm": 0.0241215992718935,
"learning_rate": 0.00019974521146102537,
"loss": 0.8079,
"step": 49
},
{
"epoch": 0.07710100231303008,
"grad_norm": 0.025045258924365044,
"learning_rate": 0.00019972734500556846,
"loss": 0.8407,
"step": 50
},
{
"epoch": 0.07864302235929067,
"grad_norm": 0.02574036829173565,
"learning_rate": 0.0001997088740276607,
"loss": 0.767,
"step": 51
},
{
"epoch": 0.08018504240555127,
"grad_norm": 0.02342085726559162,
"learning_rate": 0.00019968979863926856,
"loss": 0.7373,
"step": 52
},
{
"epoch": 0.08172706245181187,
"grad_norm": 0.023467406630516052,
"learning_rate": 0.0001996701189560223,
"loss": 0.6529,
"step": 53
},
{
"epoch": 0.08326908249807248,
"grad_norm": 0.02330499142408371,
"learning_rate": 0.00019964983509721527,
"loss": 0.7347,
"step": 54
},
{
"epoch": 0.08481110254433308,
"grad_norm": 0.027940964326262474,
"learning_rate": 0.00019962894718580324,
"loss": 0.9313,
"step": 55
},
{
"epoch": 0.08635312259059368,
"grad_norm": 0.024525761604309082,
"learning_rate": 0.00019960745534840354,
"loss": 0.767,
"step": 56
},
{
"epoch": 0.08789514263685427,
"grad_norm": 0.024538526311516762,
"learning_rate": 0.00019958535971529434,
"loss": 0.7659,
"step": 57
},
{
"epoch": 0.08943716268311488,
"grad_norm": 0.02253701537847519,
"learning_rate": 0.00019956266042041394,
"loss": 0.7805,
"step": 58
},
{
"epoch": 0.09097918272937548,
"grad_norm": 0.023676637560129166,
"learning_rate": 0.0001995393576013598,
"loss": 0.7894,
"step": 59
},
{
"epoch": 0.09252120277563608,
"grad_norm": 0.022134529426693916,
"learning_rate": 0.0001995154513993878,
"loss": 0.7492,
"step": 60
},
{
"epoch": 0.09406322282189669,
"grad_norm": 0.02350509911775589,
"learning_rate": 0.00019949094195941152,
"loss": 0.6902,
"step": 61
},
{
"epoch": 0.09560524286815729,
"grad_norm": 0.02464171312749386,
"learning_rate": 0.00019946582943000102,
"loss": 0.7836,
"step": 62
},
{
"epoch": 0.09714726291441789,
"grad_norm": 0.023095758631825447,
"learning_rate": 0.00019944011396338222,
"loss": 0.8321,
"step": 63
},
{
"epoch": 0.09868928296067848,
"grad_norm": 0.026240425184369087,
"learning_rate": 0.00019941379571543596,
"loss": 0.8461,
"step": 64
},
{
"epoch": 0.1002313030069391,
"grad_norm": 0.02174345962703228,
"learning_rate": 0.00019938687484569693,
"loss": 0.6388,
"step": 65
},
{
"epoch": 0.1017733230531997,
"grad_norm": 0.02867325395345688,
"learning_rate": 0.00019935935151735277,
"loss": 0.826,
"step": 66
},
{
"epoch": 0.10331534309946029,
"grad_norm": 0.02631618268787861,
"learning_rate": 0.00019933122589724302,
"loss": 0.9407,
"step": 67
},
{
"epoch": 0.10485736314572089,
"grad_norm": 0.022020747885107994,
"learning_rate": 0.0001993024981558583,
"loss": 0.7004,
"step": 68
},
{
"epoch": 0.1063993831919815,
"grad_norm": 0.02743780054152012,
"learning_rate": 0.000199273168467339,
"loss": 0.7607,
"step": 69
},
{
"epoch": 0.1079414032382421,
"grad_norm": 0.028378015384078026,
"learning_rate": 0.00019924323700947448,
"loss": 0.8604,
"step": 70
},
{
"epoch": 0.1094834232845027,
"grad_norm": 0.0275627039372921,
"learning_rate": 0.00019921270396370172,
"loss": 0.835,
"step": 71
},
{
"epoch": 0.1110254433307633,
"grad_norm": 0.023099975660443306,
"learning_rate": 0.0001991815695151046,
"loss": 0.7028,
"step": 72
},
{
"epoch": 0.1125674633770239,
"grad_norm": 0.028545403853058815,
"learning_rate": 0.00019914983385241236,
"loss": 0.8248,
"step": 73
},
{
"epoch": 0.1141094834232845,
"grad_norm": 0.02746577188372612,
"learning_rate": 0.00019911749716799873,
"loss": 0.7309,
"step": 74
},
{
"epoch": 0.1156515034695451,
"grad_norm": 0.024899670854210854,
"learning_rate": 0.00019908455965788067,
"loss": 0.7473,
"step": 75
},
{
"epoch": 0.11719352351580571,
"grad_norm": 0.024973087012767792,
"learning_rate": 0.00019905102152171727,
"loss": 0.8362,
"step": 76
},
{
"epoch": 0.11873554356206631,
"grad_norm": 0.023668723180890083,
"learning_rate": 0.0001990168829628083,
"loss": 0.7677,
"step": 77
},
{
"epoch": 0.1202775636083269,
"grad_norm": 0.02495860867202282,
"learning_rate": 0.0001989821441880933,
"loss": 0.7341,
"step": 78
},
{
"epoch": 0.1218195836545875,
"grad_norm": 0.02537156455218792,
"learning_rate": 0.00019894680540815006,
"loss": 0.6767,
"step": 79
},
{
"epoch": 0.12336160370084812,
"grad_norm": 0.0246786717325449,
"learning_rate": 0.0001989108668371936,
"loss": 0.7959,
"step": 80
},
{
"epoch": 0.12490362374710871,
"grad_norm": 0.02471376582980156,
"learning_rate": 0.00019887432869307458,
"loss": 0.6787,
"step": 81
},
{
"epoch": 0.1264456437933693,
"grad_norm": 0.025275586172938347,
"learning_rate": 0.00019883719119727816,
"loss": 0.7753,
"step": 82
},
{
"epoch": 0.12798766383962992,
"grad_norm": 0.021094506606459618,
"learning_rate": 0.00019879945457492267,
"loss": 0.758,
"step": 83
},
{
"epoch": 0.1295296838858905,
"grad_norm": 0.02534683421254158,
"learning_rate": 0.00019876111905475815,
"loss": 0.818,
"step": 84
},
{
"epoch": 0.13107170393215112,
"grad_norm": 0.024033140391111374,
"learning_rate": 0.00019872218486916498,
"loss": 0.775,
"step": 85
},
{
"epoch": 0.13261372397841173,
"grad_norm": 0.023884933441877365,
"learning_rate": 0.00019868265225415265,
"loss": 0.7918,
"step": 86
},
{
"epoch": 0.1341557440246723,
"grad_norm": 0.026067111641168594,
"learning_rate": 0.00019864252144935794,
"loss": 0.8368,
"step": 87
},
{
"epoch": 0.13569776407093292,
"grad_norm": 0.02631264552474022,
"learning_rate": 0.00019860179269804394,
"loss": 0.7138,
"step": 88
},
{
"epoch": 0.13723978411719354,
"grad_norm": 0.022870918735861778,
"learning_rate": 0.00019856046624709822,
"loss": 0.7423,
"step": 89
},
{
"epoch": 0.13878180416345412,
"grad_norm": 0.02922765538096428,
"learning_rate": 0.00019851854234703145,
"loss": 0.7498,
"step": 90
},
{
"epoch": 0.14032382420971473,
"grad_norm": 0.02589617855846882,
"learning_rate": 0.00019847602125197598,
"loss": 0.7438,
"step": 91
},
{
"epoch": 0.14186584425597532,
"grad_norm": 0.02394738420844078,
"learning_rate": 0.00019843290321968412,
"loss": 0.7094,
"step": 92
},
{
"epoch": 0.14340786430223593,
"grad_norm": 0.02237016148865223,
"learning_rate": 0.0001983891885115267,
"loss": 0.7868,
"step": 93
},
{
"epoch": 0.14494988434849654,
"grad_norm": 0.028733767569065094,
"learning_rate": 0.00019834487739249146,
"loss": 0.8178,
"step": 94
},
{
"epoch": 0.14649190439475712,
"grad_norm": 0.023086342960596085,
"learning_rate": 0.0001982999701311814,
"loss": 0.8368,
"step": 95
},
{
"epoch": 0.14803392444101773,
"grad_norm": 0.025624489411711693,
"learning_rate": 0.0001982544669998132,
"loss": 0.7731,
"step": 96
},
{
"epoch": 0.14957594448727835,
"grad_norm": 0.028302457183599472,
"learning_rate": 0.0001982083682742156,
"loss": 0.7942,
"step": 97
},
{
"epoch": 0.15111796453353893,
"grad_norm": 0.025008324533700943,
"learning_rate": 0.00019816167423382765,
"loss": 0.7393,
"step": 98
},
{
"epoch": 0.15265998457979954,
"grad_norm": 0.026291735470294952,
"learning_rate": 0.00019811438516169702,
"loss": 0.8016,
"step": 99
},
{
"epoch": 0.15420200462606015,
"grad_norm": 0.031547173857688904,
"learning_rate": 0.00019806650134447838,
"loss": 0.8597,
"step": 100
},
{
"epoch": 0.15574402467232074,
"grad_norm": 0.024978285655379295,
"learning_rate": 0.00019801802307243153,
"loss": 0.8182,
"step": 101
},
{
"epoch": 0.15728604471858135,
"grad_norm": 0.023977672681212425,
"learning_rate": 0.00019796895063941978,
"loss": 0.8374,
"step": 102
},
{
"epoch": 0.15882806476484193,
"grad_norm": 0.025743963196873665,
"learning_rate": 0.000197919284342908,
"loss": 0.7326,
"step": 103
},
{
"epoch": 0.16037008481110254,
"grad_norm": 0.02554011158645153,
"learning_rate": 0.00019786902448396104,
"loss": 0.7703,
"step": 104
},
{
"epoch": 0.16191210485736315,
"grad_norm": 0.027971483767032623,
"learning_rate": 0.00019781817136724165,
"loss": 0.6845,
"step": 105
},
{
"epoch": 0.16345412490362374,
"grad_norm": 0.02789183147251606,
"learning_rate": 0.00019776672530100886,
"loss": 0.838,
"step": 106
},
{
"epoch": 0.16499614494988435,
"grad_norm": 0.029534442350268364,
"learning_rate": 0.00019771468659711595,
"loss": 0.7428,
"step": 107
},
{
"epoch": 0.16653816499614496,
"grad_norm": 0.02910265140235424,
"learning_rate": 0.00019766205557100868,
"loss": 0.8891,
"step": 108
},
{
"epoch": 0.16808018504240554,
"grad_norm": 0.02285209856927395,
"learning_rate": 0.00019760883254172327,
"loss": 0.7425,
"step": 109
},
{
"epoch": 0.16962220508866616,
"grad_norm": 0.025452135130763054,
"learning_rate": 0.0001975550178318845,
"loss": 0.7617,
"step": 110
},
{
"epoch": 0.17116422513492677,
"grad_norm": 0.02843882516026497,
"learning_rate": 0.00019750061176770385,
"loss": 0.9045,
"step": 111
},
{
"epoch": 0.17270624518118735,
"grad_norm": 0.026800749823451042,
"learning_rate": 0.00019744561467897735,
"loss": 0.72,
"step": 112
},
{
"epoch": 0.17424826522744796,
"grad_norm": 0.021131988614797592,
"learning_rate": 0.00019739002689908377,
"loss": 0.6511,
"step": 113
},
{
"epoch": 0.17579028527370855,
"grad_norm": 0.02588481456041336,
"learning_rate": 0.00019733384876498245,
"loss": 0.7168,
"step": 114
},
{
"epoch": 0.17733230531996916,
"grad_norm": 0.031303439289331436,
"learning_rate": 0.00019727708061721133,
"loss": 0.8685,
"step": 115
},
{
"epoch": 0.17887432536622977,
"grad_norm": 0.02867058850824833,
"learning_rate": 0.00019721972279988477,
"loss": 0.655,
"step": 116
},
{
"epoch": 0.18041634541249035,
"grad_norm": 0.03866586834192276,
"learning_rate": 0.00019716177566069174,
"loss": 0.7957,
"step": 117
},
{
"epoch": 0.18195836545875096,
"grad_norm": 0.029205329716205597,
"learning_rate": 0.00019710323955089343,
"loss": 0.7617,
"step": 118
},
{
"epoch": 0.18350038550501158,
"grad_norm": 0.024928180500864983,
"learning_rate": 0.00019704411482532116,
"loss": 0.6982,
"step": 119
},
{
"epoch": 0.18504240555127216,
"grad_norm": 0.02545573003590107,
"learning_rate": 0.0001969844018423744,
"loss": 0.6067,
"step": 120
},
{
"epoch": 0.18658442559753277,
"grad_norm": 0.02810928039252758,
"learning_rate": 0.0001969241009640185,
"loss": 0.7112,
"step": 121
},
{
"epoch": 0.18812644564379338,
"grad_norm": 0.022296108305454254,
"learning_rate": 0.00019686321255578238,
"loss": 0.6598,
"step": 122
},
{
"epoch": 0.18966846569005397,
"grad_norm": 0.02429027482867241,
"learning_rate": 0.00019680173698675648,
"loss": 0.6381,
"step": 123
},
{
"epoch": 0.19121048573631458,
"grad_norm": 0.027076730504631996,
"learning_rate": 0.0001967396746295905,
"loss": 0.717,
"step": 124
},
{
"epoch": 0.19275250578257516,
"grad_norm": 0.02401566132903099,
"learning_rate": 0.00019667702586049108,
"loss": 0.8002,
"step": 125
},
{
"epoch": 0.19429452582883577,
"grad_norm": 0.024678878486156464,
"learning_rate": 0.00019661379105921948,
"loss": 0.7834,
"step": 126
},
{
"epoch": 0.19583654587509639,
"grad_norm": 0.029240388423204422,
"learning_rate": 0.00019654997060908946,
"loss": 0.8793,
"step": 127
},
{
"epoch": 0.19737856592135697,
"grad_norm": 0.02550147846341133,
"learning_rate": 0.0001964855648969647,
"loss": 0.6742,
"step": 128
},
{
"epoch": 0.19892058596761758,
"grad_norm": 0.02416900172829628,
"learning_rate": 0.00019642057431325672,
"loss": 0.7728,
"step": 129
},
{
"epoch": 0.2004626060138782,
"grad_norm": 0.024728331714868546,
"learning_rate": 0.0001963549992519223,
"loss": 0.7237,
"step": 130
},
{
"epoch": 0.20200462606013878,
"grad_norm": 0.025203561410307884,
"learning_rate": 0.00019628884011046123,
"loss": 0.7491,
"step": 131
},
{
"epoch": 0.2035466461063994,
"grad_norm": 0.02104656957089901,
"learning_rate": 0.00019622209728991383,
"loss": 0.7324,
"step": 132
},
{
"epoch": 0.20508866615265997,
"grad_norm": 0.03518475592136383,
"learning_rate": 0.00019615477119485855,
"loss": 0.8982,
"step": 133
},
{
"epoch": 0.20663068619892058,
"grad_norm": 0.026010941714048386,
"learning_rate": 0.00019608686223340945,
"loss": 0.7451,
"step": 134
},
{
"epoch": 0.2081727062451812,
"grad_norm": 0.025345437228679657,
"learning_rate": 0.00019601837081721386,
"loss": 0.705,
"step": 135
},
{
"epoch": 0.20971472629144178,
"grad_norm": 0.02374056540429592,
"learning_rate": 0.00019594929736144976,
"loss": 0.7307,
"step": 136
},
{
"epoch": 0.2112567463377024,
"grad_norm": 0.026990080252289772,
"learning_rate": 0.00019587964228482332,
"loss": 0.8173,
"step": 137
},
{
"epoch": 0.212798766383963,
"grad_norm": 0.026662928983569145,
"learning_rate": 0.00019580940600956638,
"loss": 0.818,
"step": 138
},
{
"epoch": 0.21434078643022358,
"grad_norm": 0.026762284338474274,
"learning_rate": 0.00019573858896143376,
"loss": 0.6674,
"step": 139
},
{
"epoch": 0.2158828064764842,
"grad_norm": 0.02683679386973381,
"learning_rate": 0.00019566719156970095,
"loss": 0.7569,
"step": 140
},
{
"epoch": 0.2174248265227448,
"grad_norm": 0.022990800440311432,
"learning_rate": 0.00019559521426716118,
"loss": 0.7051,
"step": 141
},
{
"epoch": 0.2189668465690054,
"grad_norm": 0.022913858294487,
"learning_rate": 0.00019552265749012303,
"loss": 0.7483,
"step": 142
},
{
"epoch": 0.220508866615266,
"grad_norm": 0.025732524693012238,
"learning_rate": 0.00019544952167840777,
"loss": 0.8484,
"step": 143
},
{
"epoch": 0.2220508866615266,
"grad_norm": 0.024254556745290756,
"learning_rate": 0.00019537580727534644,
"loss": 0.747,
"step": 144
},
{
"epoch": 0.2235929067077872,
"grad_norm": 0.02940620854496956,
"learning_rate": 0.0001953015147277776,
"loss": 0.8204,
"step": 145
},
{
"epoch": 0.2251349267540478,
"grad_norm": 0.02590208128094673,
"learning_rate": 0.00019522664448604418,
"loss": 0.7778,
"step": 146
},
{
"epoch": 0.2266769468003084,
"grad_norm": 0.02748725563287735,
"learning_rate": 0.00019515119700399107,
"loss": 0.7782,
"step": 147
},
{
"epoch": 0.228218966846569,
"grad_norm": 0.024179786443710327,
"learning_rate": 0.00019507517273896222,
"loss": 0.7651,
"step": 148
},
{
"epoch": 0.22976098689282962,
"grad_norm": 0.02463974617421627,
"learning_rate": 0.00019499857215179786,
"loss": 0.8215,
"step": 149
},
{
"epoch": 0.2313030069390902,
"grad_norm": 0.024559814482927322,
"learning_rate": 0.00019492139570683178,
"loss": 0.7539,
"step": 150
},
{
"epoch": 0.2328450269853508,
"grad_norm": 0.023420870304107666,
"learning_rate": 0.00019484364387188847,
"loss": 0.7035,
"step": 151
},
{
"epoch": 0.23438704703161142,
"grad_norm": 0.026096729561686516,
"learning_rate": 0.00019476531711828027,
"loss": 0.8033,
"step": 152
},
{
"epoch": 0.235929067077872,
"grad_norm": 0.02388446033000946,
"learning_rate": 0.0001946864159208045,
"loss": 0.6746,
"step": 153
},
{
"epoch": 0.23747108712413262,
"grad_norm": 0.02306438237428665,
"learning_rate": 0.0001946069407577408,
"loss": 0.7062,
"step": 154
},
{
"epoch": 0.2390131071703932,
"grad_norm": 0.024697955697774887,
"learning_rate": 0.00019452689211084775,
"loss": 0.7691,
"step": 155
},
{
"epoch": 0.2405551272166538,
"grad_norm": 0.026947690173983574,
"learning_rate": 0.00019444627046536056,
"loss": 0.7347,
"step": 156
},
{
"epoch": 0.24209714726291442,
"grad_norm": 0.02345297671854496,
"learning_rate": 0.00019436507630998757,
"loss": 0.745,
"step": 157
},
{
"epoch": 0.243639167309175,
"grad_norm": 0.029198188334703445,
"learning_rate": 0.00019428331013690765,
"loss": 0.7862,
"step": 158
},
{
"epoch": 0.24518118735543562,
"grad_norm": 0.025465266779065132,
"learning_rate": 0.00019420097244176706,
"loss": 0.6685,
"step": 159
},
{
"epoch": 0.24672320740169623,
"grad_norm": 0.02786502055823803,
"learning_rate": 0.00019411806372367655,
"loss": 0.725,
"step": 160
},
{
"epoch": 0.24826522744795682,
"grad_norm": 0.02317357063293457,
"learning_rate": 0.0001940345844852082,
"loss": 0.7075,
"step": 161
},
{
"epoch": 0.24980724749421743,
"grad_norm": 0.02511444129049778,
"learning_rate": 0.00019395053523239245,
"loss": 0.7102,
"step": 162
},
{
"epoch": 0.25134926754047804,
"grad_norm": 0.02567203901708126,
"learning_rate": 0.00019386591647471506,
"loss": 0.8113,
"step": 163
},
{
"epoch": 0.2528912875867386,
"grad_norm": 0.02611825056374073,
"learning_rate": 0.00019378072872511398,
"loss": 0.786,
"step": 164
},
{
"epoch": 0.2544333076329992,
"grad_norm": 0.02788010984659195,
"learning_rate": 0.0001936949724999762,
"loss": 0.684,
"step": 165
},
{
"epoch": 0.25597532767925985,
"grad_norm": 0.026200013235211372,
"learning_rate": 0.0001936086483191347,
"loss": 0.7563,
"step": 166
},
{
"epoch": 0.25751734772552043,
"grad_norm": 0.024887658655643463,
"learning_rate": 0.00019352175670586533,
"loss": 0.6896,
"step": 167
},
{
"epoch": 0.259059367771781,
"grad_norm": 0.027471961453557014,
"learning_rate": 0.00019343429818688347,
"loss": 0.8109,
"step": 168
},
{
"epoch": 0.26060138781804165,
"grad_norm": 0.024350160732865334,
"learning_rate": 0.00019334627329234102,
"loss": 0.7178,
"step": 169
},
{
"epoch": 0.26214340786430224,
"grad_norm": 0.02346990443766117,
"learning_rate": 0.00019325768255582302,
"loss": 0.6508,
"step": 170
},
{
"epoch": 0.2636854279105628,
"grad_norm": 0.028655072674155235,
"learning_rate": 0.00019316852651434462,
"loss": 0.7036,
"step": 171
},
{
"epoch": 0.26522744795682346,
"grad_norm": 0.024629781022667885,
"learning_rate": 0.0001930788057083476,
"loss": 0.774,
"step": 172
},
{
"epoch": 0.26676946800308404,
"grad_norm": 0.023107299581170082,
"learning_rate": 0.0001929885206816973,
"loss": 0.7021,
"step": 173
},
{
"epoch": 0.2683114880493446,
"grad_norm": 0.022794177755713463,
"learning_rate": 0.00019289767198167916,
"loss": 0.7469,
"step": 174
},
{
"epoch": 0.26985350809560527,
"grad_norm": 0.026097161695361137,
"learning_rate": 0.00019280626015899546,
"loss": 0.7325,
"step": 175
},
{
"epoch": 0.27139552814186585,
"grad_norm": 0.029879910871386528,
"learning_rate": 0.00019271428576776205,
"loss": 0.7614,
"step": 176
},
{
"epoch": 0.27293754818812643,
"grad_norm": 0.026986606419086456,
"learning_rate": 0.00019262174936550487,
"loss": 0.7718,
"step": 177
},
{
"epoch": 0.2744795682343871,
"grad_norm": 0.025835467502474785,
"learning_rate": 0.00019252865151315665,
"loss": 0.7511,
"step": 178
},
{
"epoch": 0.27602158828064766,
"grad_norm": 0.028101902455091476,
"learning_rate": 0.00019243499277505355,
"loss": 0.8136,
"step": 179
},
{
"epoch": 0.27756360832690824,
"grad_norm": 0.028153471648693085,
"learning_rate": 0.00019234077371893155,
"loss": 0.8798,
"step": 180
},
{
"epoch": 0.2791056283731688,
"grad_norm": 0.024540267884731293,
"learning_rate": 0.0001922459949159233,
"loss": 0.7854,
"step": 181
},
{
"epoch": 0.28064764841942946,
"grad_norm": 0.023485183715820312,
"learning_rate": 0.00019215065694055437,
"loss": 0.6655,
"step": 182
},
{
"epoch": 0.28218966846569005,
"grad_norm": 0.023394625633955002,
"learning_rate": 0.00019205476037073997,
"loss": 0.759,
"step": 183
},
{
"epoch": 0.28373168851195063,
"grad_norm": 0.025181008502840996,
"learning_rate": 0.00019195830578778132,
"loss": 0.7649,
"step": 184
},
{
"epoch": 0.28527370855821127,
"grad_norm": 0.023696815595030785,
"learning_rate": 0.0001918612937763622,
"loss": 0.7469,
"step": 185
},
{
"epoch": 0.28681572860447185,
"grad_norm": 0.025794658809900284,
"learning_rate": 0.00019176372492454537,
"loss": 0.7623,
"step": 186
},
{
"epoch": 0.28835774865073244,
"grad_norm": 0.02523699589073658,
"learning_rate": 0.00019166559982376904,
"loss": 0.6621,
"step": 187
},
{
"epoch": 0.2898997686969931,
"grad_norm": 0.02426300384104252,
"learning_rate": 0.00019156691906884325,
"loss": 0.748,
"step": 188
},
{
"epoch": 0.29144178874325366,
"grad_norm": 0.024227775633335114,
"learning_rate": 0.0001914676832579463,
"loss": 0.724,
"step": 189
},
{
"epoch": 0.29298380878951424,
"grad_norm": 0.031684551388025284,
"learning_rate": 0.00019136789299262108,
"loss": 0.8939,
"step": 190
},
{
"epoch": 0.2945258288357749,
"grad_norm": 0.023766616359353065,
"learning_rate": 0.0001912675488777714,
"loss": 0.7179,
"step": 191
},
{
"epoch": 0.29606784888203547,
"grad_norm": 0.02463400922715664,
"learning_rate": 0.0001911666515216585,
"loss": 0.8202,
"step": 192
},
{
"epoch": 0.29760986892829605,
"grad_norm": 0.0225905179977417,
"learning_rate": 0.00019106520153589708,
"loss": 0.6357,
"step": 193
},
{
"epoch": 0.2991518889745567,
"grad_norm": 0.022882292047142982,
"learning_rate": 0.00019096319953545185,
"loss": 0.7506,
"step": 194
},
{
"epoch": 0.3006939090208173,
"grad_norm": 0.027596216648817062,
"learning_rate": 0.00019086064613863364,
"loss": 0.7669,
"step": 195
},
{
"epoch": 0.30223592906707786,
"grad_norm": 0.030367175117135048,
"learning_rate": 0.00019075754196709572,
"loss": 0.7792,
"step": 196
},
{
"epoch": 0.3037779491133385,
"grad_norm": 0.025017013773322105,
"learning_rate": 0.00019065388764583004,
"loss": 0.6977,
"step": 197
},
{
"epoch": 0.3053199691595991,
"grad_norm": 0.02788584679365158,
"learning_rate": 0.0001905496838031634,
"loss": 0.6871,
"step": 198
},
{
"epoch": 0.30686198920585966,
"grad_norm": 0.032745130360126495,
"learning_rate": 0.00019044493107075368,
"loss": 0.8934,
"step": 199
},
{
"epoch": 0.3084040092521203,
"grad_norm": 0.027039945125579834,
"learning_rate": 0.00019033963008358598,
"loss": 0.6522,
"step": 200
},
{
"epoch": 0.3099460292983809,
"grad_norm": 0.03149978816509247,
"learning_rate": 0.0001902337814799688,
"loss": 0.844,
"step": 201
},
{
"epoch": 0.31148804934464147,
"grad_norm": 0.024369308724999428,
"learning_rate": 0.0001901273859015301,
"loss": 0.7202,
"step": 202
},
{
"epoch": 0.31303006939090205,
"grad_norm": 0.05448361113667488,
"learning_rate": 0.00019002044399321356,
"loss": 0.8301,
"step": 203
},
{
"epoch": 0.3145720894371627,
"grad_norm": 0.02388385497033596,
"learning_rate": 0.0001899129564032745,
"loss": 0.7105,
"step": 204
},
{
"epoch": 0.3161141094834233,
"grad_norm": 0.02488291636109352,
"learning_rate": 0.00018980492378327607,
"loss": 0.7393,
"step": 205
},
{
"epoch": 0.31765612952968386,
"grad_norm": 0.023874662816524506,
"learning_rate": 0.00018969634678808522,
"loss": 0.6791,
"step": 206
},
{
"epoch": 0.3191981495759445,
"grad_norm": 0.030418075621128082,
"learning_rate": 0.0001895872260758688,
"loss": 0.75,
"step": 207
},
{
"epoch": 0.3207401696222051,
"grad_norm": 0.02990088053047657,
"learning_rate": 0.00018947756230808954,
"loss": 0.7986,
"step": 208
},
{
"epoch": 0.32228218966846567,
"grad_norm": 0.027980022132396698,
"learning_rate": 0.00018936735614950197,
"loss": 0.7054,
"step": 209
},
{
"epoch": 0.3238242097147263,
"grad_norm": 0.026269223541021347,
"learning_rate": 0.00018925660826814856,
"loss": 0.8195,
"step": 210
},
{
"epoch": 0.3253662297609869,
"grad_norm": 0.025045178830623627,
"learning_rate": 0.0001891453193353555,
"loss": 0.7221,
"step": 211
},
{
"epoch": 0.3269082498072475,
"grad_norm": 0.021175356581807137,
"learning_rate": 0.00018903349002572873,
"loss": 0.6513,
"step": 212
},
{
"epoch": 0.3284502698535081,
"grad_norm": 0.023593388497829437,
"learning_rate": 0.0001889211210171498,
"loss": 0.7405,
"step": 213
},
{
"epoch": 0.3299922898997687,
"grad_norm": 0.027270464226603508,
"learning_rate": 0.00018880821299077183,
"loss": 0.7184,
"step": 214
},
{
"epoch": 0.3315343099460293,
"grad_norm": 0.025203121826052666,
"learning_rate": 0.00018869476663101523,
"loss": 0.6659,
"step": 215
},
{
"epoch": 0.3330763299922899,
"grad_norm": 0.024924185127019882,
"learning_rate": 0.0001885807826255638,
"loss": 0.7412,
"step": 216
},
{
"epoch": 0.3346183500385505,
"grad_norm": 0.022862501442432404,
"learning_rate": 0.00018846626166536026,
"loss": 0.6984,
"step": 217
},
{
"epoch": 0.3361603700848111,
"grad_norm": 0.022781461477279663,
"learning_rate": 0.0001883512044446023,
"loss": 0.6374,
"step": 218
},
{
"epoch": 0.33770239013107173,
"grad_norm": 0.023618346080183983,
"learning_rate": 0.0001882356116607383,
"loss": 0.6948,
"step": 219
},
{
"epoch": 0.3392444101773323,
"grad_norm": 0.02586747333407402,
"learning_rate": 0.0001881194840144631,
"loss": 0.7682,
"step": 220
},
{
"epoch": 0.3407864302235929,
"grad_norm": 0.026834698393940926,
"learning_rate": 0.00018800282220971366,
"loss": 0.7546,
"step": 221
},
{
"epoch": 0.34232845026985353,
"grad_norm": 0.028564658015966415,
"learning_rate": 0.00018788562695366495,
"loss": 0.8267,
"step": 222
},
{
"epoch": 0.3438704703161141,
"grad_norm": 0.02490355260670185,
"learning_rate": 0.00018776789895672558,
"loss": 0.6937,
"step": 223
},
{
"epoch": 0.3454124903623747,
"grad_norm": 0.03545152395963669,
"learning_rate": 0.00018764963893253347,
"loss": 0.7001,
"step": 224
},
{
"epoch": 0.3469545104086353,
"grad_norm": 0.02449451945722103,
"learning_rate": 0.00018753084759795158,
"loss": 0.8165,
"step": 225
},
{
"epoch": 0.3484965304548959,
"grad_norm": 0.027851196005940437,
"learning_rate": 0.00018741152567306355,
"loss": 0.7196,
"step": 226
},
{
"epoch": 0.3500385505011565,
"grad_norm": 0.02707446552813053,
"learning_rate": 0.00018729167388116934,
"loss": 0.8375,
"step": 227
},
{
"epoch": 0.3515805705474171,
"grad_norm": 0.02902469038963318,
"learning_rate": 0.00018717129294878074,
"loss": 0.6744,
"step": 228
},
{
"epoch": 0.35312259059367773,
"grad_norm": 0.031537748873233795,
"learning_rate": 0.0001870503836056172,
"loss": 0.7552,
"step": 229
},
{
"epoch": 0.3546646106399383,
"grad_norm": 0.026265786960721016,
"learning_rate": 0.00018692894658460117,
"loss": 0.7551,
"step": 230
},
{
"epoch": 0.3562066306861989,
"grad_norm": 0.02483406662940979,
"learning_rate": 0.0001868069826218538,
"loss": 0.7233,
"step": 231
},
{
"epoch": 0.35774865073245954,
"grad_norm": 0.024800019338726997,
"learning_rate": 0.0001866844924566904,
"loss": 0.7371,
"step": 232
},
{
"epoch": 0.3592906707787201,
"grad_norm": 0.02515244670212269,
"learning_rate": 0.00018656147683161593,
"loss": 0.7621,
"step": 233
},
{
"epoch": 0.3608326908249807,
"grad_norm": 0.02592633105814457,
"learning_rate": 0.00018643793649232072,
"loss": 0.7539,
"step": 234
},
{
"epoch": 0.36237471087124135,
"grad_norm": 0.0275077186524868,
"learning_rate": 0.00018631387218767561,
"loss": 0.6925,
"step": 235
},
{
"epoch": 0.36391673091750193,
"grad_norm": 0.027163324877619743,
"learning_rate": 0.00018618928466972775,
"loss": 0.7867,
"step": 236
},
{
"epoch": 0.3654587509637625,
"grad_norm": 0.026956308633089066,
"learning_rate": 0.0001860641746936957,
"loss": 0.7813,
"step": 237
},
{
"epoch": 0.36700077101002315,
"grad_norm": 0.02884814888238907,
"learning_rate": 0.0001859385430179652,
"loss": 0.7366,
"step": 238
},
{
"epoch": 0.36854279105628374,
"grad_norm": 0.025071945041418076,
"learning_rate": 0.00018581239040408432,
"loss": 0.708,
"step": 239
},
{
"epoch": 0.3700848111025443,
"grad_norm": 0.032973822206258774,
"learning_rate": 0.00018568571761675893,
"loss": 0.6544,
"step": 240
},
{
"epoch": 0.37162683114880496,
"grad_norm": 0.02571587637066841,
"learning_rate": 0.0001855585254238481,
"loss": 0.7633,
"step": 241
},
{
"epoch": 0.37316885119506554,
"grad_norm": 0.027229083701968193,
"learning_rate": 0.00018543081459635935,
"loss": 0.7752,
"step": 242
},
{
"epoch": 0.3747108712413261,
"grad_norm": 0.022508805617690086,
"learning_rate": 0.00018530258590844409,
"loss": 0.6437,
"step": 243
},
{
"epoch": 0.37625289128758677,
"grad_norm": 0.026772433891892433,
"learning_rate": 0.00018517384013739285,
"loss": 0.805,
"step": 244
},
{
"epoch": 0.37779491133384735,
"grad_norm": 0.023964572697877884,
"learning_rate": 0.00018504457806363056,
"loss": 0.7378,
"step": 245
},
{
"epoch": 0.37933693138010793,
"grad_norm": 0.02789299376308918,
"learning_rate": 0.0001849148004707119,
"loss": 0.772,
"step": 246
},
{
"epoch": 0.3808789514263685,
"grad_norm": 0.031168216839432716,
"learning_rate": 0.00018478450814531647,
"loss": 0.8299,
"step": 247
},
{
"epoch": 0.38242097147262916,
"grad_norm": 0.03058604896068573,
"learning_rate": 0.00018465370187724408,
"loss": 0.694,
"step": 248
},
{
"epoch": 0.38396299151888974,
"grad_norm": 0.028347650542855263,
"learning_rate": 0.0001845223824594099,
"loss": 0.7373,
"step": 249
},
{
"epoch": 0.3855050115651503,
"grad_norm": 0.025979626923799515,
"learning_rate": 0.00018439055068783966,
"loss": 0.8036,
"step": 250
},
{
"epoch": 0.38704703161141096,
"grad_norm": 0.029867777600884438,
"learning_rate": 0.0001842582073616649,
"loss": 0.7655,
"step": 251
},
{
"epoch": 0.38858905165767155,
"grad_norm": 0.025117915123701096,
"learning_rate": 0.00018412535328311814,
"loss": 0.7532,
"step": 252
},
{
"epoch": 0.39013107170393213,
"grad_norm": 0.023947982117533684,
"learning_rate": 0.00018399198925752778,
"loss": 0.6967,
"step": 253
},
{
"epoch": 0.39167309175019277,
"grad_norm": 0.025846531614661217,
"learning_rate": 0.00018385811609331352,
"loss": 0.7382,
"step": 254
},
{
"epoch": 0.39321511179645335,
"grad_norm": 0.025034697726368904,
"learning_rate": 0.00018372373460198138,
"loss": 0.7282,
"step": 255
},
{
"epoch": 0.39475713184271394,
"grad_norm": 0.02547437883913517,
"learning_rate": 0.00018358884559811856,
"loss": 0.7447,
"step": 256
},
{
"epoch": 0.3962991518889746,
"grad_norm": 0.027032596990466118,
"learning_rate": 0.0001834534498993888,
"loss": 0.7395,
"step": 257
},
{
"epoch": 0.39784117193523516,
"grad_norm": 0.027110572904348373,
"learning_rate": 0.0001833175483265273,
"loss": 0.7963,
"step": 258
},
{
"epoch": 0.39938319198149574,
"grad_norm": 0.027663685381412506,
"learning_rate": 0.00018318114170333568,
"loss": 0.7893,
"step": 259
},
{
"epoch": 0.4009252120277564,
"grad_norm": 0.027797933667898178,
"learning_rate": 0.00018304423085667714,
"loss": 0.7228,
"step": 260
},
{
"epoch": 0.40246723207401697,
"grad_norm": 0.026281701400876045,
"learning_rate": 0.0001829068166164712,
"loss": 0.749,
"step": 261
},
{
"epoch": 0.40400925212027755,
"grad_norm": 0.024708108976483345,
"learning_rate": 0.00018276889981568906,
"loss": 0.6307,
"step": 262
},
{
"epoch": 0.4055512721665382,
"grad_norm": 0.028213316574692726,
"learning_rate": 0.0001826304812903481,
"loss": 0.8186,
"step": 263
},
{
"epoch": 0.4070932922127988,
"grad_norm": 0.024718405678868294,
"learning_rate": 0.00018249156187950715,
"loss": 0.7077,
"step": 264
},
{
"epoch": 0.40863531225905936,
"grad_norm": 0.02398741990327835,
"learning_rate": 0.00018235214242526125,
"loss": 0.7041,
"step": 265
},
{
"epoch": 0.41017733230531994,
"grad_norm": 0.03176787495613098,
"learning_rate": 0.00018221222377273657,
"loss": 0.7036,
"step": 266
},
{
"epoch": 0.4117193523515806,
"grad_norm": 0.028862686827778816,
"learning_rate": 0.0001820718067700853,
"loss": 0.7947,
"step": 267
},
{
"epoch": 0.41326137239784116,
"grad_norm": 0.026759544387459755,
"learning_rate": 0.0001819308922684805,
"loss": 0.7737,
"step": 268
},
{
"epoch": 0.41480339244410175,
"grad_norm": 0.02719755284488201,
"learning_rate": 0.00018178948112211103,
"loss": 0.7403,
"step": 269
},
{
"epoch": 0.4163454124903624,
"grad_norm": 0.024756524711847305,
"learning_rate": 0.0001816475741881761,
"loss": 0.6994,
"step": 270
},
{
"epoch": 0.41788743253662297,
"grad_norm": 0.03232420235872269,
"learning_rate": 0.00018150517232688049,
"loss": 0.7866,
"step": 271
},
{
"epoch": 0.41942945258288356,
"grad_norm": 0.027607185766100883,
"learning_rate": 0.00018136227640142894,
"loss": 0.7905,
"step": 272
},
{
"epoch": 0.4209714726291442,
"grad_norm": 0.024344706907868385,
"learning_rate": 0.00018121888727802113,
"loss": 0.7408,
"step": 273
},
{
"epoch": 0.4225134926754048,
"grad_norm": 0.025088010355830193,
"learning_rate": 0.0001810750058258464,
"loss": 0.737,
"step": 274
},
{
"epoch": 0.42405551272166536,
"grad_norm": 0.023952683433890343,
"learning_rate": 0.00018093063291707847,
"loss": 0.7764,
"step": 275
},
{
"epoch": 0.425597532767926,
"grad_norm": 0.0288414116948843,
"learning_rate": 0.00018078576942687008,
"loss": 0.7035,
"step": 276
},
{
"epoch": 0.4271395528141866,
"grad_norm": 0.02681080810725689,
"learning_rate": 0.0001806404162333479,
"loss": 0.739,
"step": 277
},
{
"epoch": 0.42868157286044717,
"grad_norm": 0.0266602523624897,
"learning_rate": 0.0001804945742176069,
"loss": 0.7213,
"step": 278
},
{
"epoch": 0.4302235929067078,
"grad_norm": 0.025282425805926323,
"learning_rate": 0.00018034824426370523,
"loss": 0.6807,
"step": 279
},
{
"epoch": 0.4317656129529684,
"grad_norm": 0.025683747604489326,
"learning_rate": 0.00018020142725865888,
"loss": 0.7283,
"step": 280
},
{
"epoch": 0.433307632999229,
"grad_norm": 0.024966144934296608,
"learning_rate": 0.00018005412409243606,
"loss": 0.7096,
"step": 281
},
{
"epoch": 0.4348496530454896,
"grad_norm": 0.027953188866376877,
"learning_rate": 0.00017990633565795208,
"loss": 0.8148,
"step": 282
},
{
"epoch": 0.4363916730917502,
"grad_norm": 0.02772989496588707,
"learning_rate": 0.00017975806285106387,
"loss": 0.8568,
"step": 283
},
{
"epoch": 0.4379336931380108,
"grad_norm": 0.028020409867167473,
"learning_rate": 0.00017960930657056438,
"loss": 0.6732,
"step": 284
},
{
"epoch": 0.4394757131842714,
"grad_norm": 0.025754399597644806,
"learning_rate": 0.00017946006771817733,
"loss": 0.7238,
"step": 285
},
{
"epoch": 0.441017733230532,
"grad_norm": 0.030171813443303108,
"learning_rate": 0.00017931034719855166,
"loss": 0.7493,
"step": 286
},
{
"epoch": 0.4425597532767926,
"grad_norm": 0.026995845139026642,
"learning_rate": 0.00017916014591925605,
"loss": 0.6118,
"step": 287
},
{
"epoch": 0.4441017733230532,
"grad_norm": 0.03541433438658714,
"learning_rate": 0.00017900946479077346,
"loss": 0.7243,
"step": 288
},
{
"epoch": 0.4456437933693138,
"grad_norm": 0.029751230031251907,
"learning_rate": 0.00017885830472649553,
"loss": 0.7081,
"step": 289
},
{
"epoch": 0.4471858134155744,
"grad_norm": 0.022569075226783752,
"learning_rate": 0.00017870666664271707,
"loss": 0.6488,
"step": 290
},
{
"epoch": 0.448727833461835,
"grad_norm": 0.03262341767549515,
"learning_rate": 0.00017855455145863062,
"loss": 0.7626,
"step": 291
},
{
"epoch": 0.4502698535080956,
"grad_norm": 0.02811555750668049,
"learning_rate": 0.0001784019600963207,
"loss": 0.7485,
"step": 292
},
{
"epoch": 0.4518118735543562,
"grad_norm": 0.02504836954176426,
"learning_rate": 0.00017824889348075837,
"loss": 0.7636,
"step": 293
},
{
"epoch": 0.4533538936006168,
"grad_norm": 0.02362634427845478,
"learning_rate": 0.00017809535253979547,
"loss": 0.6915,
"step": 294
},
{
"epoch": 0.4548959136468774,
"grad_norm": 0.029891418293118477,
"learning_rate": 0.00017794133820415916,
"loss": 0.7686,
"step": 295
},
{
"epoch": 0.456437933693138,
"grad_norm": 0.02471439354121685,
"learning_rate": 0.0001777868514074462,
"loss": 0.6693,
"step": 296
},
{
"epoch": 0.4579799537393986,
"grad_norm": 0.025612330064177513,
"learning_rate": 0.00017763189308611722,
"loss": 0.7364,
"step": 297
},
{
"epoch": 0.45952197378565923,
"grad_norm": 0.026865236461162567,
"learning_rate": 0.00017747646417949113,
"loss": 0.7445,
"step": 298
},
{
"epoch": 0.4610639938319198,
"grad_norm": 0.024343574419617653,
"learning_rate": 0.00017732056562973954,
"loss": 0.6271,
"step": 299
},
{
"epoch": 0.4626060138781804,
"grad_norm": 0.02818606235086918,
"learning_rate": 0.00017716419838188077,
"loss": 0.7753,
"step": 300
},
{
"epoch": 0.46414803392444104,
"grad_norm": 0.026821713894605637,
"learning_rate": 0.00017700736338377435,
"loss": 0.6976,
"step": 301
},
{
"epoch": 0.4656900539707016,
"grad_norm": 0.025784511119127274,
"learning_rate": 0.00017685006158611516,
"loss": 0.7062,
"step": 302
},
{
"epoch": 0.4672320740169622,
"grad_norm": 0.028515879064798355,
"learning_rate": 0.00017669229394242766,
"loss": 0.7909,
"step": 303
},
{
"epoch": 0.46877409406322285,
"grad_norm": 0.024095451459288597,
"learning_rate": 0.0001765340614090603,
"loss": 0.7535,
"step": 304
},
{
"epoch": 0.47031611410948343,
"grad_norm": 0.025953758507966995,
"learning_rate": 0.0001763753649451794,
"loss": 0.7623,
"step": 305
},
{
"epoch": 0.471858134155744,
"grad_norm": 0.03294069692492485,
"learning_rate": 0.00017621620551276366,
"loss": 0.8946,
"step": 306
},
{
"epoch": 0.47340015420200465,
"grad_norm": 0.028394997119903564,
"learning_rate": 0.00017605658407659808,
"loss": 0.7251,
"step": 307
},
{
"epoch": 0.47494217424826524,
"grad_norm": 0.025346368551254272,
"learning_rate": 0.00017589650160426828,
"loss": 0.7074,
"step": 308
},
{
"epoch": 0.4764841942945258,
"grad_norm": 0.025906400755047798,
"learning_rate": 0.0001757359590661545,
"loss": 0.6472,
"step": 309
},
{
"epoch": 0.4780262143407864,
"grad_norm": 0.02889554388821125,
"learning_rate": 0.00017557495743542585,
"loss": 0.715,
"step": 310
},
{
"epoch": 0.47956823438704704,
"grad_norm": 0.029205597937107086,
"learning_rate": 0.00017541349768803428,
"loss": 0.698,
"step": 311
},
{
"epoch": 0.4811102544333076,
"grad_norm": 0.02610400691628456,
"learning_rate": 0.0001752515808027088,
"loss": 0.7073,
"step": 312
},
{
"epoch": 0.4826522744795682,
"grad_norm": 0.023945793509483337,
"learning_rate": 0.00017508920776094944,
"loss": 0.6865,
"step": 313
},
{
"epoch": 0.48419429452582885,
"grad_norm": 0.028426503762602806,
"learning_rate": 0.0001749263795470213,
"loss": 0.7264,
"step": 314
},
{
"epoch": 0.48573631457208943,
"grad_norm": 0.02865850180387497,
"learning_rate": 0.0001747630971479487,
"loss": 0.7204,
"step": 315
},
{
"epoch": 0.48727833461835,
"grad_norm": 0.027321334928274155,
"learning_rate": 0.00017459936155350908,
"loss": 0.7491,
"step": 316
},
{
"epoch": 0.48882035466461066,
"grad_norm": 0.02754514105618,
"learning_rate": 0.00017443517375622704,
"loss": 0.7567,
"step": 317
},
{
"epoch": 0.49036237471087124,
"grad_norm": 0.028822382912039757,
"learning_rate": 0.00017427053475136826,
"loss": 0.7559,
"step": 318
},
{
"epoch": 0.4919043947571318,
"grad_norm": 0.03181014209985733,
"learning_rate": 0.00017410544553693365,
"loss": 0.7704,
"step": 319
},
{
"epoch": 0.49344641480339246,
"grad_norm": 0.023862695321440697,
"learning_rate": 0.00017393990711365312,
"loss": 0.6085,
"step": 320
},
{
"epoch": 0.49498843484965305,
"grad_norm": 0.02703220769762993,
"learning_rate": 0.00017377392048497953,
"loss": 0.6979,
"step": 321
},
{
"epoch": 0.49653045489591363,
"grad_norm": 0.025343257933855057,
"learning_rate": 0.00017360748665708268,
"loss": 0.7287,
"step": 322
},
{
"epoch": 0.49807247494217427,
"grad_norm": 0.02830134704709053,
"learning_rate": 0.00017344060663884324,
"loss": 0.8054,
"step": 323
},
{
"epoch": 0.49961449498843485,
"grad_norm": 0.025809939950704575,
"learning_rate": 0.00017327328144184646,
"loss": 0.704,
"step": 324
}
],
"logging_steps": 1,
"max_steps": 1296,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 324,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.348585504514507e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}